{"text":"package modules\n\nimport \"net\/http\"\n\ntype StaticModule struct {\n\tBaseModule\n\trootPath string\n\tfileServer http.Handler\n}\n\nfunc NewStaticModule(node Node) *StaticModule {\n\tm := &StaticModule{}\n\tm.Init(node)\n\treturn m\n}\n\nfunc (m *StaticModule) Init(node Node) {\n\tm.rootPath = node.(Scalar).String()\n\tm.fileServer = http.FileServer(http.Dir(m.rootPath))\n}\n\nfunc (m *StaticModule) Process(req *Req, res *Res) bool {\n\tm.fileServer.ServeHTTP(res.writer, req.request)\n\treturn true\n}\nadd log for static, fix returnpackage modules\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n)\n\ntype StaticModule struct {\n\tBaseModule\n\trootPath string\n\tfileServer http.Handler\n}\n\nfunc NewStaticModule(node Node) *StaticModule {\n\tm := &StaticModule{}\n\tm.Init(node)\n\treturn m\n}\n\nfunc (m *StaticModule) Init(node Node) {\n\tm.rootPath = node.(Scalar).String()\n\tm.fileServer = http.FileServer(http.Dir(m.rootPath))\n}\n\nfunc (m *StaticModule) Process(req *Req, res *Res) bool {\n\tlog.Println(\"Static process:\", req.GetPath())\n\tm.fileServer.ServeHTTP(res, req.request)\n\tif res.statusCode >= 400 && res.statusCode < 500 {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"\/*\n User : https:\/\/api.github.com\/users\/Omie\n returns a dict\n has repos_url : https:\/\/api.github.com\/users\/Omie\/repos\n Repos : https:\/\/api.github.com\/users\/Omie\/repos\n returns a list of dict\n has collaborators_url : https:\/\/api.github.com\/repos\/Omie\/configfiles\/collaborators\n Collaborators : https:\/\/api.github.com\/repos\/Omie\/configfiles\/collaborators\n returns a list of dict\n has repos_url for each user\n*\/\n\npackage main\n\nimport (\n \"os\"\n \"fmt\"\n \"encoding\/json\"\n \"io\/ioutil\"\n \"net\/http\"\n \"strings\"\n \"log\"\n \"errors\"\n \"github.com\/omie\/ghlib\"\n)\n\nvar visited = make(map[string]string)\n\nvar requestsLeft int = 60\n\nvar username, password string\n\n\/\/because Math.Min is for float64\nfunc min(a, b int) int {\n if a <= b {\n return a\n }\n return b\n}\n\nfunc getData(url string) ([]byte, error) {\n log.Println(\"--- reached getData for \", url)\n\n requestsLeft--\n if requestsLeft < 0 {\n log.Println(\"--- LIMIT REACHED \")\n return nil, errors.New(\"limit reached\")\n }\n\n client := &http.Client{}\n\n \/* Authenticate *\/\n req, err := http.NewRequest(\"GET\", url, nil)\n req.SetBasicAuth(username, password)\n resp, err := client.Do(req)\n if err != nil {\n return nil, err\n }\n defer resp.Body.Close()\n\n body, err := ioutil.ReadAll(resp.Body)\n if err != nil {\n return nil, err\n }\n\n return body, nil\n}\n\nfunc getApiLimit() (int, error) {\n jsonData, err := getData(\"https:\/\/api.github.com\/rate_limit\")\n if err != nil {\n return 0, err\n }\n\n var limitData ghlib.GhLimit\n if err := json.Unmarshal(jsonData, &limitData); err != nil {\n return 0, err\n }\n return limitData.Rate.Remaining, nil\n}\n\nfunc getReposURL(username string) (string, error) {\n log.Println(\"--- reached getReposURL for \", username)\n\n userJsonData, err := getData(\"https:\/\/api.github.com\/users\/\" + username)\n if err != nil {\n return \"\", err\n }\n\n var user ghlib.GhUser\n if err := json.Unmarshal(userJsonData, &user); err != nil {\n return \"\", err\n }\n return user.ReposUrl, nil\n}\n\nfunc processCollaborators(collabURL string) {\n log.Println(\"--- reached processCollaborators for \", collabURL)\n if _, exists := visited[collabURL]; exists {\n log.Println(\"--- skipped \", collabURL)\n return\n }\n visited[collabURL] = collabURL\n\n jsonData, err := getData(collabURL)\n if err != nil {\n return\n }\n\n var collaborators []*ghlib.GhUser\n err = json.Unmarshal(jsonData, &collaborators)\n if err != nil {\n log.Println(\"Error while parsing collaborators: \", err)\n return\n }\n \/\/for each collaborator\n for _, collaborator := range collaborators {\n \/\/handle user if not previously listed\n tempUser := collaborator.Login\n if _, exists := visited[tempUser]; exists {\n continue\n }\n \/\/We found new user in network\n fmt.Println(\"User : \", tempUser)\n visited[tempUser] = tempUser\n tempRepoURL := collaborator.ReposUrl\n\n \/\/make a call to processRepo(tempRepoURL)\n processRepos(tempRepoURL)\n } \/\/end for\n}\n\nfunc processRepos(repoURL string) {\n log.Println(\"--- reached processRepos for \", repoURL)\n if _, exists := visited[repoURL]; exists {\n log.Println(\"--- skipped \", repoURL)\n return\n }\n visited[repoURL] = repoURL\n\n repoData, err := getData(repoURL) \/\/get a list of repositories\n if err != nil {\n log.Println(\"err while getting data\", err)\n return\n }\n\n var repoList []*ghlib.GhRepository\n err = json.Unmarshal(repoData, &repoList)\n if err != nil {\n log.Println(\"Error while parsing repo list: \", err)\n return\n }\n\n \/\/m := min(len(repoList), 2)\n \/\/repoList = repoList[:m] \/\/limit to only 2 entries for time being\n\n for _, repo := range repoList {\n tempCollabsURL := repo.CollaboratorsUrl\n log.Println(tempCollabsURL)\n idx := strings.Index(tempCollabsURL, \"{\")\n \/\/use bytes package for serious string manipulation. much faster\n collabURL := tempCollabsURL[:idx]\n processCollaborators(collabURL)\n }\n\n} \/\/end processRepos\n\nfunc main() {\n f, err := os.OpenFile(\"\/tmp\/linkedhub.log\", os.O_WRONLY | os.O_CREATE | os.O_APPEND, 0666)\n if err != nil {\n fmt.Println(\"Could not open file for logging\")\n return\n }\n defer f.Close()\n\n log.SetOutput(f)\n\n fmt.Println(\"Enter github credentials\")\n fmt.Print(\"username: \")\n fmt.Scanln(&username)\n fmt.Print(\"password: \")\n fmt.Scanln(&password)\n\n \/\/find out current API limit\n limit, err := getApiLimit()\n if err != nil {\n fmt.Println(\"error while getting limit: \", err)\n return\n }\n if limit <= 10 {\n fmt.Println(\"Too few of API calls left. Not worth it.\")\n return\n }\n requestsLeft = limit\n fmt.Println(\"requestsLeft: \", requestsLeft)\n\n \/\/get username from command line\n var u string\n fmt.Println(\"Enter github username: \")\n fmt.Scanln(&u)\n\n repoURL, err := getReposURL(u)\n if err != nil {\n log.Println(\"error while getting repo url for: \", u)\n return\n }\n\n processRepos(repoURL)\n}\n\nadded maxDepth limitation\/*\n User : https:\/\/api.github.com\/users\/Omie\n returns a dict\n has repos_url : https:\/\/api.github.com\/users\/Omie\/repos\n Repos : https:\/\/api.github.com\/users\/Omie\/repos\n returns a list of dict\n has collaborators_url : https:\/\/api.github.com\/repos\/Omie\/configfiles\/collaborators\n Collaborators : https:\/\/api.github.com\/repos\/Omie\/configfiles\/collaborators\n returns a list of dict\n has repos_url for each user\n*\/\n\npackage main\n\nimport (\n \"os\"\n \"fmt\"\n \"encoding\/json\"\n \"io\/ioutil\"\n \"net\/http\"\n \"strings\"\n \"log\"\n \"errors\"\n \"github.com\/omie\/ghlib\"\n)\n\nvar visited = make(map[string]string)\n\nvar requestsLeft int = 60\n\nvar username, password string\n\nconst maxDepth int = 3\n\n\/\/because Math.Min is for float64\nfunc min(a, b int) int {\n if a <= b {\n return a\n }\n return b\n}\n\nfunc getData(url string) ([]byte, error) {\n log.Println(\"--- reached getData for \", url)\n\n requestsLeft--\n if requestsLeft < 0 {\n log.Println(\"--- LIMIT REACHED \")\n return nil, errors.New(\"limit reached\")\n }\n\n client := &http.Client{}\n\n \/* Authenticate *\/\n req, err := http.NewRequest(\"GET\", url, nil)\n req.SetBasicAuth(username, password)\n resp, err := client.Do(req)\n if err != nil {\n return nil, err\n }\n defer resp.Body.Close()\n\n body, err := ioutil.ReadAll(resp.Body)\n if err != nil {\n return nil, err\n }\n\n return body, nil\n}\n\nfunc getApiLimit() (int, error) {\n jsonData, err := getData(\"https:\/\/api.github.com\/rate_limit\")\n if err != nil {\n return 0, err\n }\n\n var limitData ghlib.GhLimit\n if err := json.Unmarshal(jsonData, &limitData); err != nil {\n return 0, err\n }\n return limitData.Rate.Remaining, nil\n}\n\nfunc getReposURL(username string) (string, error) {\n log.Println(\"--- reached getReposURL for \", username)\n\n userJsonData, err := getData(\"https:\/\/api.github.com\/users\/\" + username)\n if err != nil {\n return \"\", err\n }\n\n var user ghlib.GhUser\n if err := json.Unmarshal(userJsonData, &user); err != nil {\n return \"\", err\n }\n return user.ReposUrl, nil\n}\n\nfunc processCollaborators(collabURL string, currentDepth int) {\n log.Println(\"--- reached processCollaborators for \", collabURL)\n if _, exists := visited[collabURL]; exists {\n log.Println(\"--- skipped \", collabURL)\n return\n }\n visited[collabURL] = collabURL\n\n jsonData, err := getData(collabURL)\n if err != nil {\n return\n }\n\n var collaborators []*ghlib.GhUser\n err = json.Unmarshal(jsonData, &collaborators)\n if err != nil {\n log.Println(\"Error while parsing collaborators: \", err)\n return\n }\n \/\/for each collaborator\n for _, collaborator := range collaborators {\n \/\/handle user if not previously listed\n tempUser := collaborator.Login\n if _, exists := visited[tempUser]; exists {\n continue\n }\n \/\/We found new user in network\n fmt.Println(\"User : \", tempUser)\n visited[tempUser] = tempUser\n tempRepoURL := collaborator.ReposUrl\n\n \/\/make a call to processRepo(tempRepoURL)\n processRepos(tempRepoURL, currentDepth+1)\n } \/\/end for\n}\n\nfunc processRepos(repoURL string, currentDepth int) {\n log.Println(\"--- reached processRepos for \", repoURL)\n if currentDepth > maxDepth {\n log.Println(\"maxDepth reached\")\n return\n }\n\n if _, exists := visited[repoURL]; exists {\n log.Println(\"--- skipped \", repoURL)\n return\n }\n visited[repoURL] = repoURL\n\n repoData, err := getData(repoURL) \/\/get a list of repositories\n if err != nil {\n log.Println(\"err while getting data\", err)\n return\n }\n\n var repoList []*ghlib.GhRepository\n err = json.Unmarshal(repoData, &repoList)\n if err != nil {\n log.Println(\"Error while parsing repo list: \", err)\n return\n }\n\n \/\/m := min(len(repoList), 2)\n \/\/repoList = repoList[:m] \/\/limit to only 2 entries for time being\n\n for _, repo := range repoList {\n tempCollabsURL := repo.CollaboratorsUrl\n log.Println(tempCollabsURL)\n idx := strings.Index(tempCollabsURL, \"{\")\n \/\/use bytes package for serious string manipulation. much faster\n collabURL := tempCollabsURL[:idx]\n processCollaborators(collabURL, currentDepth)\n }\n\n} \/\/end processRepos\n\nfunc main() {\n f, err := os.OpenFile(\"\/tmp\/linkedhub.log\", os.O_WRONLY | os.O_CREATE | os.O_APPEND, 0666)\n if err != nil {\n fmt.Println(\"Could not open file for logging\")\n return\n }\n defer f.Close()\n\n log.SetOutput(f)\n\n fmt.Println(\"Enter github credentials\")\n fmt.Print(\"username: \")\n fmt.Scanln(&username)\n fmt.Print(\"password: \")\n fmt.Scanln(&password)\n\n \/\/find out current API limit\n limit, err := getApiLimit()\n if err != nil {\n fmt.Println(\"error while getting limit: \", err)\n return\n }\n if limit <= 10 {\n fmt.Println(\"Too few of API calls left. Not worth it.\")\n return\n }\n requestsLeft = limit\n fmt.Println(\"requestsLeft: \", requestsLeft)\n\n \/\/get username from command line\n var u string\n fmt.Println(\"Enter github username: \")\n fmt.Scanln(&u)\n\n repoURL, err := getReposURL(u)\n if err != nil {\n log.Println(\"error while getting repo url for: \", u)\n return\n }\n\n processRepos(repoURL, 0)\n}\n\n<|endoftext|>"} {"text":"package mssql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/vault\/builtin\/logical\/database\/dbplugin\"\n\t\"github.com\/hashicorp\/vault\/helper\/strutil\"\n\t\"github.com\/hashicorp\/vault\/plugins\/helper\/database\/connutil\"\n\t\"github.com\/hashicorp\/vault\/plugins\/helper\/database\/credsutil\"\n\t\"github.com\/hashicorp\/vault\/plugins\/helper\/database\/dbutil\"\n)\n\nconst msSQLTypeName = \"mssql\"\n\n\/\/ MSSQL is an implementation of Database interface\ntype MSSQL struct {\n\tconnutil.ConnectionProducer\n\tcredsutil.CredentialsProducer\n}\n\nfunc New() (interface{}, error) {\n\tconnProducer := &connutil.SQLConnectionProducer{}\n\tconnProducer.Type = msSQLTypeName\n\n\tcredsProducer := &credsutil.SQLCredentialsProducer{\n\t\tDisplayNameLen: 4,\n\t\tUsernameLen: 16,\n\t}\n\n\tdbType := &MSSQL{\n\t\tConnectionProducer: connProducer,\n\t\tCredentialsProducer: credsProducer,\n\t}\n\n\treturn dbType, nil\n}\n\n\/\/ Run instantiates a MSSQL object, and runs the RPC server for the plugin\nfunc Run() error {\n\tdbType, err := New()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdbplugin.NewPluginServer(dbType.(*MSSQL))\n\n\treturn nil\n}\n\n\/\/ Type returns the TypeName for this backend\nfunc (m *MSSQL) Type() (string, error) {\n\treturn msSQLTypeName, nil\n}\n\nfunc (m *MSSQL) getConnection() (*sql.DB, error) {\n\tdb, err := m.Connection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db.(*sql.DB), nil\n}\n\n\/\/ CreateUser generates the username\/password on the underlying MSSQL secret backend as instructed by\n\/\/ the CreationStatement provided.\nfunc (m *MSSQL) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) {\n\t\/\/ Grab the lock\n\tm.Lock()\n\tdefer m.Unlock()\n\n\t\/\/ Get the connection\n\tdb, err := m.getConnection()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif statements.CreationStatements == \"\" {\n\t\treturn \"\", \"\", dbutil.ErrEmptyCreationStatement\n\t}\n\n\tusername, err = m.GenerateUsername(usernamePrefix)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpassword, err = m.GeneratePassword()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\texpirationStr, err := m.GenerateExpiration(expiration)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t\/\/ Start a transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer tx.Rollback()\n\n\t\/\/ Execute each query\n\tfor _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, \";\") {\n\t\tquery = strings.TrimSpace(query)\n\t\tif len(query) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tstmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{\n\t\t\t\"name\": username,\n\t\t\t\"password\": password,\n\t\t\t\"expiration\": expirationStr,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tdefer stmt.Close()\n\t\tif _, err := stmt.Exec(); err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t\/\/ Commit the transaction\n\tif err := tx.Commit(); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn username, password, nil\n}\n\n\/\/ RenewUser is not supported on MSSQL, so this is a no-op.\nfunc (m *MSSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {\n\t\/\/ NOOP\n\treturn nil\n}\n\n\/\/ RevokeUser attempts to drop the specified user. It will first attempt to disable login,\n\/\/ then kill pending connections from that user, and finally drop the user and login from the\n\/\/ database instance.\nfunc (m *MSSQL) RevokeUser(statements dbplugin.Statements, username string) error {\n\t\/\/ Get connection\n\tdb, err := m.getConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ First disable server login\n\tdisableStmt, err := db.Prepare(fmt.Sprintf(\"ALTER LOGIN [%s] DISABLE;\", username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer disableStmt.Close()\n\tif _, err := disableStmt.Exec(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Query for sessions for the login so that we can kill any outstanding\n\t\/\/ sessions. There cannot be any active sessions before we drop the logins\n\t\/\/ This isn't done in a transaction because even if we fail along the way,\n\t\/\/ we want to remove as much access as possible\n\tsessionStmt, err := db.Prepare(fmt.Sprintf(\n\t\t\"SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';\", username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sessionStmt.Close()\n\n\tsessionRows, err := sessionStmt.Query()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sessionRows.Close()\n\n\tvar revokeStmts []string\n\tfor sessionRows.Next() {\n\t\tvar sessionID int\n\t\terr = sessionRows.Scan(&sessionID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trevokeStmts = append(revokeStmts, fmt.Sprintf(\"KILL %d;\", sessionID))\n\t}\n\n\t\/\/ Query for database users using undocumented stored procedure for now since\n\t\/\/ it is the easiest way to get this information;\n\t\/\/ we need to drop the database users before we can drop the login and the role\n\t\/\/ This isn't done in a transaction because even if we fail along the way,\n\t\/\/ we want to remove as much access as possible\n\tstmt, err := db.Prepare(fmt.Sprintf(\"EXEC master.dbo.sp_msloginmappings '%s';\", username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar loginName, dbName, qUsername string\n\t\tvar aliasName sql.NullString\n\t\terr = rows.Scan(&loginName, &dbName, &qUsername, &aliasName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trevokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username))\n\t}\n\n\t\/\/ we do not stop on error, as we want to remove as\n\t\/\/ many permissions as possible right now\n\tvar lastStmtError error\n\tfor _, query := range revokeStmts {\n\t\tstmt, err := db.Prepare(query)\n\t\tif err != nil {\n\t\t\tlastStmtError = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer stmt.Close()\n\t\t_, err = stmt.Exec()\n\t\tif err != nil {\n\t\t\tlastStmtError = err\n\t\t}\n\t}\n\n\t\/\/ can't drop if not all database users are dropped\n\tif rows.Err() != nil {\n\t\treturn fmt.Errorf(\"cound not generate sql statements for all rows: %s\", rows.Err())\n\t}\n\tif lastStmtError != nil {\n\t\treturn fmt.Errorf(\"could not perform all sql statements: %s\", lastStmtError)\n\t}\n\n\t\/\/ Drop this login\n\tstmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\tif _, err := stmt.Exec(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nconst dropUserSQL = `\nUSE [%s]\nIF EXISTS\n (SELECT name\n FROM sys.database_principals\n WHERE name = N'%s')\nBEGIN\n DROP USER [%s]\nEND\n`\n\nconst dropLoginSQL = `\nIF EXISTS\n (SELECT name\n FROM master.sys.server_principals\n WHERE name = N'%s')\nBEGIN\n DROP LOGIN [%s]\nEND\n`\nIf user provides a revocation statement for MSSQL plugin honor itpackage mssql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/vault\/builtin\/logical\/database\/dbplugin\"\n\t\"github.com\/hashicorp\/vault\/helper\/strutil\"\n\t\"github.com\/hashicorp\/vault\/plugins\/helper\/database\/connutil\"\n\t\"github.com\/hashicorp\/vault\/plugins\/helper\/database\/credsutil\"\n\t\"github.com\/hashicorp\/vault\/plugins\/helper\/database\/dbutil\"\n)\n\nconst msSQLTypeName = \"mssql\"\n\n\/\/ MSSQL is an implementation of Database interface\ntype MSSQL struct {\n\tconnutil.ConnectionProducer\n\tcredsutil.CredentialsProducer\n}\n\nfunc New() (interface{}, error) {\n\tconnProducer := &connutil.SQLConnectionProducer{}\n\tconnProducer.Type = msSQLTypeName\n\n\tcredsProducer := &credsutil.SQLCredentialsProducer{\n\t\tDisplayNameLen: 4,\n\t\tUsernameLen: 16,\n\t}\n\n\tdbType := &MSSQL{\n\t\tConnectionProducer: connProducer,\n\t\tCredentialsProducer: credsProducer,\n\t}\n\n\treturn dbType, nil\n}\n\n\/\/ Run instantiates a MSSQL object, and runs the RPC server for the plugin\nfunc Run() error {\n\tdbType, err := New()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdbplugin.NewPluginServer(dbType.(*MSSQL))\n\n\treturn nil\n}\n\n\/\/ Type returns the TypeName for this backend\nfunc (m *MSSQL) Type() (string, error) {\n\treturn msSQLTypeName, nil\n}\n\nfunc (m *MSSQL) getConnection() (*sql.DB, error) {\n\tdb, err := m.Connection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db.(*sql.DB), nil\n}\n\n\/\/ CreateUser generates the username\/password on the underlying MSSQL secret backend as instructed by\n\/\/ the CreationStatement provided.\nfunc (m *MSSQL) CreateUser(statements dbplugin.Statements, usernamePrefix string, expiration time.Time) (username string, password string, err error) {\n\t\/\/ Grab the lock\n\tm.Lock()\n\tdefer m.Unlock()\n\n\t\/\/ Get the connection\n\tdb, err := m.getConnection()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif statements.CreationStatements == \"\" {\n\t\treturn \"\", \"\", dbutil.ErrEmptyCreationStatement\n\t}\n\n\tusername, err = m.GenerateUsername(usernamePrefix)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpassword, err = m.GeneratePassword()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\texpirationStr, err := m.GenerateExpiration(expiration)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t\/\/ Start a transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer tx.Rollback()\n\n\t\/\/ Execute each query\n\tfor _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, \";\") {\n\t\tquery = strings.TrimSpace(query)\n\t\tif len(query) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tstmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{\n\t\t\t\"name\": username,\n\t\t\t\"password\": password,\n\t\t\t\"expiration\": expirationStr,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tdefer stmt.Close()\n\t\tif _, err := stmt.Exec(); err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t\/\/ Commit the transaction\n\tif err := tx.Commit(); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn username, password, nil\n}\n\n\/\/ RenewUser is not supported on MSSQL, so this is a no-op.\nfunc (m *MSSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {\n\t\/\/ NOOP\n\treturn nil\n}\n\n\/\/ RevokeUser attempts to drop the specified user. It will first attempt to disable login,\n\/\/ then kill pending connections from that user, and finally drop the user and login from the\n\/\/ database instance.\nfunc (m *MSSQL) RevokeUser(statements dbplugin.Statements, username string) error {\n\tif statements.RevocationStatements == \"\" {\n\t\treturn m.revokeUserDefault(username)\n\t}\n\n\t\/\/ Get connection\n\tdb, err := m.getConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Start a transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\t\/\/ Execute each query\n\tfor _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, \";\") {\n\t\tquery = strings.TrimSpace(query)\n\t\tif len(query) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tstmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{\n\t\t\t\"name\": username,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer stmt.Close()\n\t\tif _, err := stmt.Exec(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Commit the transaction\n\tif err := tx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m *MSSQL) revokeUserDefault(username string) error {\n\t\/\/ Get connection\n\tdb, err := m.getConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ First disable server login\n\tdisableStmt, err := db.Prepare(fmt.Sprintf(\"ALTER LOGIN [%s] DISABLE;\", username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer disableStmt.Close()\n\tif _, err := disableStmt.Exec(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Query for sessions for the login so that we can kill any outstanding\n\t\/\/ sessions. There cannot be any active sessions before we drop the logins\n\t\/\/ This isn't done in a transaction because even if we fail along the way,\n\t\/\/ we want to remove as much access as possible\n\tsessionStmt, err := db.Prepare(fmt.Sprintf(\n\t\t\"SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';\", username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sessionStmt.Close()\n\n\tsessionRows, err := sessionStmt.Query()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sessionRows.Close()\n\n\tvar revokeStmts []string\n\tfor sessionRows.Next() {\n\t\tvar sessionID int\n\t\terr = sessionRows.Scan(&sessionID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trevokeStmts = append(revokeStmts, fmt.Sprintf(\"KILL %d;\", sessionID))\n\t}\n\n\t\/\/ Query for database users using undocumented stored procedure for now since\n\t\/\/ it is the easiest way to get this information;\n\t\/\/ we need to drop the database users before we can drop the login and the role\n\t\/\/ This isn't done in a transaction because even if we fail along the way,\n\t\/\/ we want to remove as much access as possible\n\tstmt, err := db.Prepare(fmt.Sprintf(\"EXEC master.dbo.sp_msloginmappings '%s';\", username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar loginName, dbName, qUsername string\n\t\tvar aliasName sql.NullString\n\t\terr = rows.Scan(&loginName, &dbName, &qUsername, &aliasName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trevokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username))\n\t}\n\n\t\/\/ we do not stop on error, as we want to remove as\n\t\/\/ many permissions as possible right now\n\tvar lastStmtError error\n\tfor _, query := range revokeStmts {\n\t\tstmt, err := db.Prepare(query)\n\t\tif err != nil {\n\t\t\tlastStmtError = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer stmt.Close()\n\t\t_, err = stmt.Exec()\n\t\tif err != nil {\n\t\t\tlastStmtError = err\n\t\t}\n\t}\n\n\t\/\/ can't drop if not all database users are dropped\n\tif rows.Err() != nil {\n\t\treturn fmt.Errorf(\"cound not generate sql statements for all rows: %s\", rows.Err())\n\t}\n\tif lastStmtError != nil {\n\t\treturn fmt.Errorf(\"could not perform all sql statements: %s\", lastStmtError)\n\t}\n\n\t\/\/ Drop this login\n\tstmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\tif _, err := stmt.Exec(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nconst dropUserSQL = `\nUSE [%s]\nIF EXISTS\n (SELECT name\n FROM sys.database_principals\n WHERE name = N'%s')\nBEGIN\n DROP USER [%s]\nEND\n`\n\nconst dropLoginSQL = `\nIF EXISTS\n (SELECT name\n FROM master.sys.server_principals\n WHERE name = N'%s')\nBEGIN\n DROP LOGIN [%s]\nEND\n`\n<|endoftext|>"} {"text":"package network\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"collectd.org\/api\"\n)\n\nconst (\n\tdsTypeGauge = 1\n\tdsTypeDerive = 2\n)\n\nconst (\n\ttypeHost = 0x0000\n\ttypeTime = 0x0001\n\ttypeTimeHR = 0x0008\n\ttypePlugin = 0x0002\n\ttypePluginInstance = 0x0003\n\ttypeType = 0x0004\n\ttypeTypeInstance = 0x0005\n\ttypeValues = 0x0006\n\ttypeInterval = 0x0007\n\ttypeIntervalHR = 0x0009\n)\n\nconst DefaultBufferSize = 1452\n\n\/\/ Buffer contains the binary representation of multiple ValueLists and state\n\/\/ optimally write the next ValueList.\ntype Buffer struct {\n\tlock *sync.Mutex\n\tbuffer *bytes.Buffer\n\toutput io.Writer\n\tstate api.ValueList\n\tsize int\n}\n\n\/\/ NewBuffer initializes a new Buffer.\nfunc NewBuffer(w io.Writer) *Buffer {\n\treturn &Buffer{\n\t\tlock: new(sync.Mutex),\n\t\tbuffer: new(bytes.Buffer),\n\t\toutput: w,\n\t\tsize: DefaultBufferSize,\n\t}\n}\n\nfunc (b *Buffer) flush(n int) error {\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tbuf := make([]byte, n)\n\n\tif _, err := b.buffer.Read(buf); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.output.Write(buf); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Buffer) Flush() error {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\treturn b.flush(b.buffer.Len())\n}\n\n\/\/ WriteValueList adds a ValueList to the network buffer.\nfunc (b *Buffer) WriteValueList(vl api.ValueList) (n int, err error) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\torigLen := b.buffer.Len()\n\n\tb.writeIdentifier(vl.Identifier)\n\tb.writeTime(vl.Time)\n\tb.writeInterval(vl.Interval)\n\tb.writeValues(vl.Values)\n\n\tn = b.buffer.Len() - origLen\n\terr = nil\n\tif b.buffer.Len() >= b.size {\n\t\terr = b.flush(origLen)\n\t}\n\n\treturn\n}\n\nfunc (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {\n\tn, err = b.buffer.WriteTo(w)\n\n\tb.buffer.Reset()\n\tb.state = api.ValueList{}\n\treturn\n}\n\nfunc (b *Buffer) writeIdentifier(id api.Identifier) {\n\tif id.Host != b.state.Host {\n\t\tb.writeString(typeHost, id.Host)\n\t\tb.state.Host = id.Host\n\t}\n\tif id.Plugin != b.state.Plugin {\n\t\tb.writeString(typePlugin, id.Plugin)\n\t\tb.state.Plugin = id.Plugin\n\t}\n\tif id.PluginInstance != b.state.PluginInstance {\n\t\tb.writeString(typePluginInstance, id.PluginInstance)\n\t\tb.state.PluginInstance = id.PluginInstance\n\t}\n\tif id.Type != b.state.Type {\n\t\tb.writeString(typeType, id.Type)\n\t\tb.state.Type = id.Type\n\t}\n\tif id.TypeInstance != b.state.TypeInstance {\n\t\tb.writeString(typeTypeInstance, id.TypeInstance)\n\t\tb.state.TypeInstance = id.TypeInstance\n\t}\n}\n\nfunc (b *Buffer) writeTime(t time.Time) error {\n\tif b.state.Time == t {\n\t\treturn nil\n\t}\n\tb.state.Time = t\n\n\treturn b.writeInt(typeTimeHR, api.Cdtime(t))\n}\n\nfunc (b *Buffer) writeInterval(d time.Duration) error {\n\tif b.state.Interval == d {\n\t\treturn nil\n\t}\n\tb.state.Interval = d\n\n\treturn b.writeInt(typeIntervalHR, api.CdtimeDuration(d))\n}\n\nfunc (b *Buffer) writeValues(values []api.Value) error {\n\tsize := uint16(6 + 9*len(values))\n\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(typeValues))\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(size))\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(len(values)))\n\n\tfor _, v := range values {\n\t\tswitch v.(type) {\n\t\tcase api.Gauge:\n\t\t\tbinary.Write(b.buffer, binary.BigEndian, uint8(dsTypeGauge))\n\t\tcase api.Derive:\n\t\t\tbinary.Write(b.buffer, binary.BigEndian, uint8(dsTypeDerive))\n\t\tdefault:\n\t\t\tpanic(\"unexpected type\")\n\t\t}\n\t}\n\n\tfor _, v := range values {\n\t\tswitch v := v.(type) {\n\t\tcase api.Gauge:\n\t\t\tif math.IsNaN(float64(v)) {\n\t\t\t\tb.buffer.Write([]byte{0, 0, 0, 0, 0, 0, 0xf8, 0x7f})\n\t\t\t} else {\n\t\t\t\t\/\/ sic: floats are encoded in little endian.\n\t\t\t\tbinary.Write(b.buffer, binary.LittleEndian, float64(v))\n\t\t\t}\n\t\tcase api.Derive:\n\t\t\tbinary.Write(b.buffer, binary.BigEndian, int64(v))\n\t\tdefault:\n\t\t\tpanic(\"unexpected type\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Buffer) writeString(typ uint16, s string) error {\n\tencoded := bytes.NewBufferString(s)\n\tencoded.Write([]byte{0})\n\n\t\/\/ Because s is a Unicode string, encoded.Len() may be larger than\n\t\/\/ len(s).\n\tsize := uint16(4 + encoded.Len())\n\n\tbinary.Write(b.buffer, binary.BigEndian, typ)\n\tbinary.Write(b.buffer, binary.BigEndian, size)\n\tb.buffer.Write(encoded.Bytes())\n\n\treturn nil\n}\n\nfunc (b *Buffer) writeInt(typ uint16, n uint64) error {\n\tbinary.Write(b.buffer, binary.BigEndian, typ)\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(12))\n\tbinary.Write(b.buffer, binary.BigEndian, n)\n\n\treturn nil\n}\nnetwork: Change WriteValueList() to return an error only.package network\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"collectd.org\/api\"\n)\n\nconst (\n\tdsTypeGauge = 1\n\tdsTypeDerive = 2\n)\n\nconst (\n\ttypeHost = 0x0000\n\ttypeTime = 0x0001\n\ttypeTimeHR = 0x0008\n\ttypePlugin = 0x0002\n\ttypePluginInstance = 0x0003\n\ttypeType = 0x0004\n\ttypeTypeInstance = 0x0005\n\ttypeValues = 0x0006\n\ttypeInterval = 0x0007\n\ttypeIntervalHR = 0x0009\n)\n\nconst DefaultBufferSize = 1452\n\n\/\/ Buffer contains the binary representation of multiple ValueLists and state\n\/\/ optimally write the next ValueList.\ntype Buffer struct {\n\tlock *sync.Mutex\n\tbuffer *bytes.Buffer\n\toutput io.Writer\n\tstate api.ValueList\n\tsize int\n}\n\n\/\/ NewBuffer initializes a new Buffer.\nfunc NewBuffer(w io.Writer) *Buffer {\n\treturn &Buffer{\n\t\tlock: new(sync.Mutex),\n\t\tbuffer: new(bytes.Buffer),\n\t\toutput: w,\n\t\tsize: DefaultBufferSize,\n\t}\n}\n\nfunc (b *Buffer) flush(n int) error {\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tbuf := make([]byte, n)\n\n\tif _, err := b.buffer.Read(buf); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.output.Write(buf); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Buffer) Flush() error {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\treturn b.flush(b.buffer.Len())\n}\n\n\/\/ WriteValueList adds a ValueList to the network buffer.\nfunc (b *Buffer) WriteValueList(vl api.ValueList) error {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\torigLen := b.buffer.Len()\n\n\tb.writeIdentifier(vl.Identifier)\n\tb.writeTime(vl.Time)\n\tb.writeInterval(vl.Interval)\n\tb.writeValues(vl.Values)\n\n\tif b.buffer.Len() >= b.size {\n\t\tif err := b.flush(origLen); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {\n\tn, err = b.buffer.WriteTo(w)\n\n\tb.buffer.Reset()\n\tb.state = api.ValueList{}\n\treturn\n}\n\nfunc (b *Buffer) writeIdentifier(id api.Identifier) {\n\tif id.Host != b.state.Host {\n\t\tb.writeString(typeHost, id.Host)\n\t\tb.state.Host = id.Host\n\t}\n\tif id.Plugin != b.state.Plugin {\n\t\tb.writeString(typePlugin, id.Plugin)\n\t\tb.state.Plugin = id.Plugin\n\t}\n\tif id.PluginInstance != b.state.PluginInstance {\n\t\tb.writeString(typePluginInstance, id.PluginInstance)\n\t\tb.state.PluginInstance = id.PluginInstance\n\t}\n\tif id.Type != b.state.Type {\n\t\tb.writeString(typeType, id.Type)\n\t\tb.state.Type = id.Type\n\t}\n\tif id.TypeInstance != b.state.TypeInstance {\n\t\tb.writeString(typeTypeInstance, id.TypeInstance)\n\t\tb.state.TypeInstance = id.TypeInstance\n\t}\n}\n\nfunc (b *Buffer) writeTime(t time.Time) error {\n\tif b.state.Time == t {\n\t\treturn nil\n\t}\n\tb.state.Time = t\n\n\treturn b.writeInt(typeTimeHR, api.Cdtime(t))\n}\n\nfunc (b *Buffer) writeInterval(d time.Duration) error {\n\tif b.state.Interval == d {\n\t\treturn nil\n\t}\n\tb.state.Interval = d\n\n\treturn b.writeInt(typeIntervalHR, api.CdtimeDuration(d))\n}\n\nfunc (b *Buffer) writeValues(values []api.Value) error {\n\tsize := uint16(6 + 9*len(values))\n\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(typeValues))\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(size))\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(len(values)))\n\n\tfor _, v := range values {\n\t\tswitch v.(type) {\n\t\tcase api.Gauge:\n\t\t\tbinary.Write(b.buffer, binary.BigEndian, uint8(dsTypeGauge))\n\t\tcase api.Derive:\n\t\t\tbinary.Write(b.buffer, binary.BigEndian, uint8(dsTypeDerive))\n\t\tdefault:\n\t\t\tpanic(\"unexpected type\")\n\t\t}\n\t}\n\n\tfor _, v := range values {\n\t\tswitch v := v.(type) {\n\t\tcase api.Gauge:\n\t\t\tif math.IsNaN(float64(v)) {\n\t\t\t\tb.buffer.Write([]byte{0, 0, 0, 0, 0, 0, 0xf8, 0x7f})\n\t\t\t} else {\n\t\t\t\t\/\/ sic: floats are encoded in little endian.\n\t\t\t\tbinary.Write(b.buffer, binary.LittleEndian, float64(v))\n\t\t\t}\n\t\tcase api.Derive:\n\t\t\tbinary.Write(b.buffer, binary.BigEndian, int64(v))\n\t\tdefault:\n\t\t\tpanic(\"unexpected type\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Buffer) writeString(typ uint16, s string) error {\n\tencoded := bytes.NewBufferString(s)\n\tencoded.Write([]byte{0})\n\n\t\/\/ Because s is a Unicode string, encoded.Len() may be larger than\n\t\/\/ len(s).\n\tsize := uint16(4 + encoded.Len())\n\n\tbinary.Write(b.buffer, binary.BigEndian, typ)\n\tbinary.Write(b.buffer, binary.BigEndian, size)\n\tb.buffer.Write(encoded.Bytes())\n\n\treturn nil\n}\n\nfunc (b *Buffer) writeInt(typ uint16, n uint64) error {\n\tbinary.Write(b.buffer, binary.BigEndian, typ)\n\tbinary.Write(b.buffer, binary.BigEndian, uint16(12))\n\tbinary.Write(b.buffer, binary.BigEndian, n)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package kubernetes\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/weaveworks\/scope\/report\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\tapiv1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n\tapiv1beta1 \"k8s.io\/client-go\/pkg\/apis\/extensions\/v1beta1\"\n)\n\n\/\/ These constants are keys used in node metadata\nconst (\n\tFullyLabeledReplicas = \"kubernetes_fully_labeled_replicas\"\n)\n\n\/\/ ReplicaSet represents a Kubernetes replica set\ntype ReplicaSet interface {\n\tMeta\n\tSelector() (labels.Selector, error)\n\tAddParent(topology, id string)\n\tGetNode(probeID string) report.Node\n}\n\ntype replicaSet struct {\n\t*apiv1beta1.ReplicaSet\n\tMeta\n\tparents report.Sets\n\tNode *apiv1.Node\n}\n\n\/\/ NewReplicaSet creates a new ReplicaSet\nfunc NewReplicaSet(r *apiv1beta1.ReplicaSet) ReplicaSet {\n\treturn &replicaSet{\n\t\tReplicaSet: r,\n\t\tMeta: meta{r.ObjectMeta},\n\t\tparents: report.MakeSets(),\n\t}\n}\n\nfunc (r *replicaSet) Selector() (labels.Selector, error) {\n\tselector, err := metav1.LabelSelectorAsSelector(r.Spec.Selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn selector, nil\n}\n\nfunc (r *replicaSet) AddParent(topology, id string) {\n\tr.parents = r.parents.Add(topology, report.MakeStringSet(id))\n}\n\nfunc (r *replicaSet) GetNode(probeID string) report.Node {\n\treturn r.MetaNode(report.MakeReplicaSetNodeID(r.UID())).WithLatests(map[string]string{\n\t\tObservedGeneration: fmt.Sprint(r.Status.ObservedGeneration),\n\t\tReplicas: fmt.Sprint(r.Status.Replicas),\n\t\tDesiredReplicas: fmt.Sprint(r.Spec.Replicas),\n\t\tFullyLabeledReplicas: fmt.Sprint(r.Status.FullyLabeledReplicas),\n\t\treport.ControlProbeID: probeID,\n\t}).WithParents(r.parents).WithLatestActiveControls(ScaleUp, ScaleDown)\n}\nfix incorrect reporting of replicaset DesiredReplicaspackage kubernetes\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/weaveworks\/scope\/report\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\tapiv1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n\tapiv1beta1 \"k8s.io\/client-go\/pkg\/apis\/extensions\/v1beta1\"\n)\n\n\/\/ These constants are keys used in node metadata\nconst (\n\tFullyLabeledReplicas = \"kubernetes_fully_labeled_replicas\"\n)\n\n\/\/ ReplicaSet represents a Kubernetes replica set\ntype ReplicaSet interface {\n\tMeta\n\tSelector() (labels.Selector, error)\n\tAddParent(topology, id string)\n\tGetNode(probeID string) report.Node\n}\n\ntype replicaSet struct {\n\t*apiv1beta1.ReplicaSet\n\tMeta\n\tparents report.Sets\n\tNode *apiv1.Node\n}\n\n\/\/ NewReplicaSet creates a new ReplicaSet\nfunc NewReplicaSet(r *apiv1beta1.ReplicaSet) ReplicaSet {\n\treturn &replicaSet{\n\t\tReplicaSet: r,\n\t\tMeta: meta{r.ObjectMeta},\n\t\tparents: report.MakeSets(),\n\t}\n}\n\nfunc (r *replicaSet) Selector() (labels.Selector, error) {\n\tselector, err := metav1.LabelSelectorAsSelector(r.Spec.Selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn selector, nil\n}\n\nfunc (r *replicaSet) AddParent(topology, id string) {\n\tr.parents = r.parents.Add(topology, report.MakeStringSet(id))\n}\n\nfunc (r *replicaSet) GetNode(probeID string) report.Node {\n\t\/\/ Spec.Replicas can be omitted, and the pointer will be nil. It defaults to 1.\n\tdesiredReplicas := 1\n\tif r.Spec.Replicas != nil {\n\t\tdesiredReplicas = int(*r.Spec.Replicas)\n\t}\n\treturn r.MetaNode(report.MakeReplicaSetNodeID(r.UID())).WithLatests(map[string]string{\n\t\tObservedGeneration: fmt.Sprint(r.Status.ObservedGeneration),\n\t\tReplicas: fmt.Sprint(r.Status.Replicas),\n\t\tDesiredReplicas: fmt.Sprint(desiredReplicas),\n\t\tFullyLabeledReplicas: fmt.Sprint(r.Status.FullyLabeledReplicas),\n\t\treport.ControlProbeID: probeID,\n\t}).WithParents(r.parents).WithLatestActiveControls(ScaleUp, ScaleDown)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage api\n\nimport (\n\t\"github.com\/juju\/errors\"\n\n\t\"github.com\/juju\/juju\/process\"\n)\n\ntype Client struct {\n}\n\nfunc NewClient() (*Client, error) {\n\t\/\/ TODO(ericsnow) finish\n\treturn &Client{}, errors.Errorf(\"not finished\")\n}\n\nfunc (c *Client) List() ([]string, error) {\n\t\/\/ TODO(ericsnow) finish\n\treturn nil, errors.Errorf(\"not finished\")\n}\n\nfunc (c *Client) Get(ids ...string) ([]*process.Info, error) {\n\t\/\/ TODO(ericsnow) finish\n\treturn nil, errors.Errorf(\"not finished\")\n}\n\nfunc (c *Client) Set(procs ...*process.Info) error {\n\t\/\/ TODO(ericsnow) finish\n\treturn errors.Errorf(\"not finished\")\n}\nAdd missing doc comments.\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage api\n\nimport (\n\t\"github.com\/juju\/errors\"\n\n\t\"github.com\/juju\/juju\/process\"\n)\n\n\/\/ Client provides methods for interacting with the Juju API relative\n\/\/ to workload processes.\ntype Client struct {\n}\n\n\/\/ NewClient builds a new workload process API client.\nfunc NewClient() (*Client, error) {\n\t\/\/ TODO(ericsnow) finish\n\treturn &Client{}, errors.Errorf(\"not finished\")\n}\n\n\/\/ List gets the list of defined workload processes from Juju\n\/\/ via the API.\nfunc (c *Client) List() ([]string, error) {\n\t\/\/ TODO(ericsnow) finish\n\treturn nil, errors.Errorf(\"not finished\")\n}\n\n\/\/ Get gets the info for the specified workload processes via the API.\nfunc (c *Client) Get(ids ...string) ([]*process.Info, error) {\n\t\/\/ TODO(ericsnow) finish\n\treturn nil, errors.Errorf(\"not finished\")\n}\n\n\/\/ Set pushes the provided process info up to Juju state via the API.\nfunc (c *Client) Set(procs ...*process.Info) error {\n\t\/\/ TODO(ericsnow) finish\n\treturn errors.Errorf(\"not finished\")\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\/\/\"fmt\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"time\"\n)\n\nvar _ = Describe(\"Configuration\", func() {\n\n\tvar configuration *Configuration\n\tvar args []string\n\tdefaultWaitTime := time.Duration(0)\n\tdefaultDuration := time.Duration(0)\n\n\tBeforeEach(func() {\n\t\targs = []string{\"file-path.yml\"}\n\t\tconfiguration, _ = ParseConfiguration(args)\n\t})\n\n\tDescribe(\"When no config file is found and no command line args are provided\", func() {\n\t\tDescribe(\"Loading a default configuration\", func() {\n\t\t\tIt(\"sets duration (--duration)\", func() {\n\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultDuration))\n\t\t\t})\n\t\t\tIt(\"sets random (--random)\", func() {\n\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t})\n\t\t\tIt(\"sets summary (--summary)\", func() {\n\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t})\n\t\t\tIt(\"sets workers (--workers)\", func() {\n\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t})\n\t\t\tIt(\"sets wait-time (--wait-time)\", func() {\n\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"When config file is not found in pwd\", func() {\n\t\tDescribe(\"and config file is found in user home\", func() {\n\t\t})\n\t})\n\n\tDescribe(\"When config file is found in pwd\", func() {\n\t\tDescribe(\"and config file is found in user home\", func() {\n\t\t})\n\t})\n\n\tDescribe(\"When commandline args are provided\", func() {\n\t\tDescribe(\"overriding the default configuration\", func() {\n\t\t\tDescribe(\"for duration (--duration)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--duration\", \"3s\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tduration, _ := time.ParseDuration(\"3s\")\n\t\t\t\t\tExpect(configuration.Duration).To(Equal(duration))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for file\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.FilePath).To(Equal(\".\/path\/to\/file\"))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for random (--random)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--random\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.Random).To(Equal(true))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for summary (--summary)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--summary\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.Summary).To(Equal(true))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for workers (--workers)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--workers\", \"3\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.Workers).To(Equal(3))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for wait-time (--wait-time)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--wait-time\", \"3s\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\twaitTime, _ := time.ParseDuration(\"3s\")\n\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(waitTime))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"setting multiple command line args\", func() {\n\t\t})\n\n\t\tDescribe(\"with invalid arg values\", func() {\n\t\t\tDescribe(\"for duration\", func() {\n\t\t\t\tIt(\"returns error\", func() {\n\t\t\t\t\targs = []string{\"--duration\", \"xs\", \".\/path\/to\/file\"}\n\t\t\t\t\t_, err := ParseConfiguration(args)\n\t\t\t\t\tExpect(err).Should(MatchError(\"Cannot parse the value specified for --duration: 'xs'\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for workers\", func() {\n\t\t\t\tIt(\"returns error\", func() {\n\t\t\t\t\targs = []string{\"--workers\", \"xs\", \".\/path\/to\/file\"}\n\t\t\t\t\t_, err := ParseConfiguration(args)\n Expect(err).Should(MatchError(\"strconv.ParseFloat: parsing \\\"xs\\\": invalid syntax\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for wait-time\", func() {\n\t\t\t\tIt(\"returns error\", func() {\n\t\t\t\t\targs = []string{\"--wait-time\", \"xs\", \".\/path\/to\/file\"}\n\t\t\t\t\t_, err := ParseConfiguration(args)\n\t\t\t\t\tExpect(err).Should(MatchError(\"Cannot parse the value specified for --wait-time: 'xs'\"))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\nAdded test for multiple command line argspackage main\n\nimport (\n\t\/\/\"fmt\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"time\"\n)\n\nvar _ = Describe(\"Configuration\", func() {\n\n\tvar configuration *Configuration\n\tvar args []string\n\tdefaultWaitTime := time.Duration(0)\n\tdefaultDuration := time.Duration(0)\n\n\tBeforeEach(func() {\n\t\targs = []string{\"file-path.yml\"}\n\t\tconfiguration, _ = ParseConfiguration(args)\n\t})\n\n\tDescribe(\"When no config file is found and no command line args are provided\", func() {\n\t\tDescribe(\"Loading a default configuration\", func() {\n\t\t\tIt(\"sets duration (--duration)\", func() {\n\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultDuration))\n\t\t\t})\n\t\t\tIt(\"sets random (--random)\", func() {\n\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t})\n\t\t\tIt(\"sets summary (--summary)\", func() {\n\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t})\n\t\t\tIt(\"sets workers (--workers)\", func() {\n\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t})\n\t\t\tIt(\"sets wait-time (--wait-time)\", func() {\n\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"When config file is not found in pwd\", func() {\n\t\tDescribe(\"and config file is found in user home\", func() {\n\t\t})\n\t})\n\n\tDescribe(\"When config file is found in pwd\", func() {\n\t\tDescribe(\"and config file is found in user home\", func() {\n\t\t})\n\t})\n\n\tDescribe(\"When commandline args are provided\", func() {\n\t\tDescribe(\"overriding the default configuration\", func() {\n\t\t\tDescribe(\"for duration (--duration)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--duration\", \"3s\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tduration, _ := time.ParseDuration(\"3s\")\n\t\t\t\t\tExpect(configuration.Duration).To(Equal(duration))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for file\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.FilePath).To(Equal(\".\/path\/to\/file\"))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for random (--random)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--random\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.Random).To(Equal(true))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for summary (--summary)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--summary\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.Summary).To(Equal(true))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for workers (--workers)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--workers\", \"3\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\tExpect(configuration.Workers).To(Equal(3))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"wait-time\", func() {\n\t\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for wait-time (--wait-time)\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\targs = []string{\"--wait-time\", \"3s\", \".\/path\/to\/file\"}\n\t\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t\t})\n\t\t\t\tIt(\"applies the override\", func() {\n\t\t\t\t\twaitTime, _ := time.ParseDuration(\"3s\")\n\t\t\t\t\tExpect(configuration.WaitTime).To(Equal(waitTime))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"leaves the default for\", func() {\n\t\t\t\t\tIt(\"duration\", func() {\n\t\t\t\t\t\tExpect(configuration.Duration).To(Equal(defaultDuration))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"random\", func() {\n\t\t\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"summary\", func() {\n\t\t\t\t\t\tExpect(configuration.Summary).To(Equal(false))\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"workers\", func() {\n\t\t\t\t\t\tExpect(configuration.Workers).To(Equal(1))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"setting multiple command line args\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\targs = []string{\"--summary\", \"--workers\", \"50\", \"--duration\", \"3s\", \".\/path\/to\/file\"}\n\t\t\t\tconfiguration, _ = ParseConfiguration(args)\n\t\t\t})\n\t\t\tIt(\"applies the overrides\", func() {\n\t\t\t\tduration, _ := time.ParseDuration(\"3s\")\n\t\t\t\tExpect(configuration.Duration).To(Equal(duration))\n\t\t\t\tExpect(configuration.Summary).To(Equal(true))\n\t\t\t\tExpect(configuration.Workers).To(Equal(50))\n\t\t\t})\n\n\t\t\tIt(\"does not override the defaults for other args\", func() {\n\t\t\t\tExpect(configuration.Random).To(Equal(false))\n\t\t\t\tExpect(configuration.WaitTime).To(Equal(defaultWaitTime))\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"with invalid arg values\", func() {\n\t\t\tDescribe(\"for duration\", func() {\n\t\t\t\tIt(\"returns error\", func() {\n\t\t\t\t\targs = []string{\"--duration\", \"xs\", \".\/path\/to\/file\"}\n\t\t\t\t\t_, err := ParseConfiguration(args)\n\t\t\t\t\tExpect(err).Should(MatchError(\"Cannot parse the value specified for --duration: 'xs'\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for workers\", func() {\n\t\t\t\tIt(\"returns error\", func() {\n\t\t\t\t\targs = []string{\"--workers\", \"xs\", \".\/path\/to\/file\"}\n\t\t\t\t\t_, err := ParseConfiguration(args)\n\t\t\t\t\tExpect(err).Should(MatchError(\"strconv.ParseFloat: parsing \\\"xs\\\": invalid syntax\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"for wait-time\", func() {\n\t\t\t\tIt(\"returns error\", func() {\n\t\t\t\t\targs = []string{\"--wait-time\", \"xs\", \".\/path\/to\/file\"}\n\t\t\t\t\t_, err := ParseConfiguration(args)\n\t\t\t\t\tExpect(err).Should(MatchError(\"Cannot parse the value specified for --wait-time: 'xs'\"))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage input\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\tvpa_types \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/apis\/poc.autoscaling.k8s.io\/v1alpha1\"\n\tvpa_clientset \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/client\/clientset\/versioned\"\n\tvpa_api \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/client\/clientset\/versioned\/typed\/poc.autoscaling.k8s.io\/v1alpha1\"\n\tvpa_lister \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/client\/listers\/poc.autoscaling.k8s.io\/v1alpha1\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/history\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/metrics\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/oom\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/spec\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/model\"\n\tvpa_api_util \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/utils\/vpa\"\n\tkube_client \"k8s.io\/client-go\/kubernetes\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\tv1lister \"k8s.io\/client-go\/listers\/core\/v1\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\tresourceclient \"k8s.io\/metrics\/pkg\/client\/clientset\/versioned\/typed\/metrics\/v1beta1\"\n)\n\n\/\/ ClusterStateFeeder can update state of ClusterState object.\ntype ClusterStateFeeder interface {\n\n\t\/\/ InitFromHistoryProvider loads historical pod spec into clusterState.\n\tInitFromHistoryProvider(historyProvider history.HistoryProvider)\n\n\t\/\/ InitFromCheckpoints loads historical checkpoints into clusterState.\n\tInitFromCheckpoints()\n\n\t\/\/ LoadVPAs updtes clusterState with current state of VPAs.\n\tLoadVPAs()\n\n\t\/\/ LoadPods updates slusterState with current specification of Pods and their Containers.\n\tLoadPods()\n\n\t\/\/ LoadRealTimeMetrics updates clusterState with current usage metrics of containers.\n\tLoadRealTimeMetrics()\n\n\t\/\/ GarbageCollectCheckpoints removes historical checkpoints that don't have a matching VPA.\n\tGarbageCollectCheckpoints()\n}\n\n\/\/ NewClusterStateFeeder creates new ClusterStateFeeder with internal data providers, based on kube client config and a historyProvider.\nfunc NewClusterStateFeeder(config *rest.Config, clusterState *model.ClusterState) ClusterStateFeeder {\n\tkubeClient := kube_client.NewForConfigOrDie(config)\n\toomObserver := oom.NewObserver()\n\tpodLister := newPodClients(kubeClient, &oomObserver)\n\twatchEvictionEvents(kubeClient, &oomObserver)\n\treturn &clusterStateFeeder{\n\t\tcoreClient: kubeClient.CoreV1(),\n\t\tspecClient: spec.NewSpecClient(podLister),\n\t\tmetricsClient: newMetricsClient(config),\n\t\toomObserver: &oomObserver,\n\t\tvpaCheckpointClient: vpa_clientset.NewForConfigOrDie(config).PocV1alpha1(),\n\t\tvpaLister: vpa_api_util.NewAllVpasLister(vpa_clientset.NewForConfigOrDie(config), make(chan struct{})),\n\t\tclusterState: clusterState,\n\t}\n}\n\nfunc newMetricsClient(config *rest.Config) metrics.MetricsClient {\n\tmetricsGetter := resourceclient.NewForConfigOrDie(config)\n\treturn metrics.NewMetricsClient(metricsGetter)\n}\n\nfunc watchEvictionEvents(kubeClient kube_client.Interface, observer *oom.Observer) {\n\toptions := metav1.ListOptions{\n\t\tFieldSelector: \"reason=Evicted\",\n\t}\n\twatchInterface, err := kubeClient.CoreV1().Events(\"\").Watch(options)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot initialize watching events. Reason %v\", err)\n\t\treturn\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tresult := <-watchInterface.ResultChan()\n\t\t\tif result.Type == watch.Added {\n\t\t\t\tresult, ok := result.Object.(*apiv1.Event)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tobserver.OnEvent(result)\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Creates clients watching pods: PodLister (listing only not terminated pods).\nfunc newPodClients(kubeClient kube_client.Interface, resourceEventHandler cache.ResourceEventHandler) v1lister.PodLister {\n\tselector := fields.ParseSelectorOrDie(\"status.phase!=\" + string(apiv1.PodPending))\n\tpodListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), \"pods\", apiv1.NamespaceAll, selector)\n\tindexer, controller := cache.NewIndexerInformer(\n\t\tpodListWatch,\n\t\t&apiv1.Pod{},\n\t\ttime.Hour,\n\t\tresourceEventHandler,\n\t\tcache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},\n\t)\n\tpodLister := v1lister.NewPodLister(indexer)\n\tstopCh := make(chan struct{})\n\tgo controller.Run(stopCh)\n\treturn podLister\n}\n\ntype clusterStateFeeder struct {\n\tcoreClient corev1.CoreV1Interface\n\tspecClient spec.SpecClient\n\tmetricsClient metrics.MetricsClient\n\toomObserver *oom.Observer\n\tvpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter\n\tvpaLister vpa_lister.VerticalPodAutoscalerLister\n\tclusterState *model.ClusterState\n}\n\nfunc (feeder *clusterStateFeeder) InitFromHistoryProvider(historyProvider history.HistoryProvider) {\n\tglog.V(3).Info(\"Initializing VPA from history provider\")\n\tclusterHistory, err := historyProvider.GetClusterHistory()\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot get cluster history: %v\", err)\n\t}\n\tfor podID, podHistory := range clusterHistory {\n\t\tglog.V(4).Infof(\"Adding pod %v with labels %v\", podID, podHistory.LastLabels)\n\t\tfeeder.clusterState.AddOrUpdatePod(podID, podHistory.LastLabels, apiv1.PodUnknown)\n\t\tfor containerName, sampleList := range podHistory.Samples {\n\t\t\tcontainerID := model.ContainerID{\n\t\t\t\tPodID: podID,\n\t\t\t\tContainerName: containerName}\n\t\t\tglog.V(4).Infof(\"Adding %d samples for container %v\", len(sampleList), containerID)\n\t\t\tfor _, sample := range sampleList {\n\t\t\t\tfeeder.clusterState.AddSample(\n\t\t\t\t\t&model.ContainerUsageSampleWithKey{\n\t\t\t\t\t\tContainerUsageSample: sample,\n\t\t\t\t\t\tContainer: containerID})\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpoint) error {\n\tvpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}\n\tvpa, exists := feeder.clusterState.Vpas[vpaID]\n\tif !exists {\n\t\treturn fmt.Errorf(\"Cannot load checkpoint to missing VPA object %+v\", vpaID)\n\t}\n\n\tcs := model.NewAggregateContainerState()\n\terr := cs.LoadFromCheckpoint(&checkpoint.Status)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot load checkpoint for VPA %+v. Reason: %v\", vpa.ID, err)\n\t}\n\tvpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs\n\treturn nil\n}\n\nfunc (feeder *clusterStateFeeder) InitFromCheckpoints() {\n\tglog.V(3).Info(\"Initializing VPA from checkpoints\")\n\tfeeder.LoadVPAs()\n\n\tnamespaces := make(map[string]bool)\n\tfor _, v := range feeder.clusterState.Vpas {\n\t\tnamespaces[v.ID.Namespace] = true\n\t}\n\n\tfor namespace := range namespaces {\n\t\tglog.V(3).Infof(\"Fetching checkpoints from namespace %s\", namespace)\n\t\tcheckpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(metav1.ListOptions{})\n\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot list VPA checkpoints from namespace %v. Reason: %+v\", namespace, err)\n\t\t}\n\t\tfor _, checkpoint := range checkpointList.Items {\n\n\t\t\tglog.V(3).Infof(\"Loading VPA %s\/%s checkpoint for %s\", checkpoint.ObjectMeta.Namespace, checkpoint.Spec.VPAObjectName, checkpoint.Spec.ContainerName)\n\t\t\terr = feeder.setVpaCheckpoint(&checkpoint)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Error while loading checkpoint. Reason: %+v\", err)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (feeder *clusterStateFeeder) GarbageCollectCheckpoints() {\n\tglog.V(3).Info(\"Starting garbage collection of checkpoints\")\n\tfeeder.LoadVPAs()\n\n\tnamspaceList, err := feeder.coreClient.Namespaces().List(metav1.ListOptions{})\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot list namespaces. Reason: %+v\", err)\n\t\treturn\n\t}\n\n\tfor _, namespaceItem := range namspaceList.Items {\n\t\tnamespace := namespaceItem.Name\n\t\tcheckpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot list VPA checkpoints from namespace %v. Reason: %+v\", namespace, err)\n\t\t}\n\t\tfor _, checkpoint := range checkpointList.Items {\n\t\t\tvpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}\n\t\t\t_, exists := feeder.clusterState.Vpas[vpaID]\n\t\t\tif !exists {\n\t\t\t\terr = feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).Delete(checkpoint.Name, &metav1.DeleteOptions{})\n\t\t\t\tif err == nil {\n\t\t\t\t\tglog.V(3).Infof(\"Orphaned VPA checkpoint cleanup - deleting %v\/%v.\", namespace, checkpoint.Name)\n\t\t\t\t} else {\n\t\t\t\t\tglog.Errorf(\"Cannot delete VPA checkpoint %v\/%v. Reason: %+v\", namespace, checkpoint.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Fetch VPA objects and load them into the cluster state.\nfunc (feeder *clusterStateFeeder) LoadVPAs() {\n\t\/\/ List VPA API objects.\n\tvpaCRDs, err := feeder.vpaLister.List(labels.Everything())\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot list VPAs. Reason: %+v\", err)\n\t} else {\n\t\tglog.V(3).Infof(\"Fetched %d VPAs.\", len(vpaCRDs))\n\t}\n\t\/\/ Add or update existing VPAs in the model.\n\tvpaKeys := make(map[model.VpaID]bool)\n\tfor n, vpaCRD := range vpaCRDs {\n\t\tglog.V(3).Infof(\"VPA CRD #%v: %+v\", n, vpaCRD)\n\t\tvpaID := model.VpaID{\n\t\t\tNamespace: vpaCRD.Namespace,\n\t\t\tVpaName: vpaCRD.Name}\n\t\tif feeder.clusterState.AddOrUpdateVpa(vpaCRD) == nil {\n\t\t\t\/\/ Successfully added VPA to the model.\n\t\t\tvpaKeys[vpaID] = true\n\t\t}\n\t}\n\t\/\/ Delete non-existent VPAs from the model.\n\tfor vpaID := range feeder.clusterState.Vpas {\n\t\tif _, exists := vpaKeys[vpaID]; !exists {\n\t\t\tglog.V(3).Infof(\"Deleting VPA %v\", vpaID)\n\t\t\tfeeder.clusterState.DeleteVpa(vpaID)\n\t\t}\n\t}\n}\n\n\/\/ Load pod into the cluster state.\nfunc (feeder *clusterStateFeeder) LoadPods() {\n\tpodSpecs, err := feeder.specClient.GetPodSpecs()\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot get SimplePodSpecs. Reason: %+v\", err)\n\t}\n\tpods := make(map[model.PodID]*spec.BasicPodSpec)\n\tfor n, spec := range podSpecs {\n\t\tglog.V(3).Infof(\"SimplePodSpec #%v: %+v\", n, spec)\n\t\tpods[spec.ID] = spec\n\t}\n\tfor key := range feeder.clusterState.Pods {\n\t\tif _, exists := pods[key]; !exists {\n\t\t\tglog.V(3).Infof(\"Deleting Pod %v\", key)\n\t\t\tfeeder.clusterState.DeletePod(key)\n\t\t}\n\t}\n\tfor _, pod := range pods {\n\t\tfeeder.clusterState.AddOrUpdatePod(pod.ID, pod.PodLabels, pod.Phase)\n\t\tfor _, container := range pod.Containers {\n\t\t\tfeeder.clusterState.AddOrUpdateContainer(container.ID, container.Request)\n\t\t}\n\t}\n}\n\nfunc (feeder *clusterStateFeeder) LoadRealTimeMetrics() {\n\tcontainersMetrics, err := feeder.metricsClient.GetContainersMetrics()\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot get ContainerMetricsSnapshot from MetricsClient. Reason: %+v\", err)\n\t}\n\n\tsampleCount := 0\n\tfor _, containerMetrics := range containersMetrics {\n\t\tfor _, sample := range newContainerUsageSamplesWithKey(containerMetrics) {\n\t\t\tfeeder.clusterState.AddSample(sample)\n\t\t\tsampleCount++\n\t\t}\n\t}\n\tglog.V(3).Infof(\"ClusterSpec fed with #%v ContainerUsageSamples for #%v containers\", sampleCount, len(containersMetrics))\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase oomInfo := <-feeder.oomObserver.ObservedOomsChannel:\n\t\t\tglog.V(3).Infof(\"OOM detected %+v\", oomInfo)\n\t\t\tcontainer := model.ContainerID{\n\t\t\t\tPodID: model.PodID{\n\t\t\t\t\tNamespace: oomInfo.Namespace,\n\t\t\t\t\tPodName: oomInfo.Pod,\n\t\t\t\t},\n\t\t\t\tContainerName: oomInfo.Container,\n\t\t\t}\n\t\t\tfeeder.clusterState.RecordOOM(container, oomInfo.Timestamp, model.ResourceAmount(oomInfo.Memory.Value()))\n\t\tdefault:\n\t\t\tbreak Loop\n\t\t}\n\t}\n}\n\nfunc newContainerUsageSamplesWithKey(metrics *metrics.ContainerMetricsSnapshot) []*model.ContainerUsageSampleWithKey {\n\tvar samples []*model.ContainerUsageSampleWithKey\n\n\tfor metricName, resourceAmount := range metrics.Usage {\n\t\tsample := &model.ContainerUsageSampleWithKey{\n\t\t\tContainer: metrics.ID,\n\t\t\tContainerUsageSample: model.ContainerUsageSample{\n\t\t\t\tMeasureStart: metrics.SnapshotTime,\n\t\t\t\tResource: metricName,\n\t\t\t\tUsage: resourceAmount,\n\t\t\t},\n\t\t}\n\t\tsamples = append(samples, sample)\n\t}\n\treturn samples\n}\nHandle channel closes from pod event watcher\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage input\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\tvpa_types \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/apis\/poc.autoscaling.k8s.io\/v1alpha1\"\n\tvpa_clientset \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/client\/clientset\/versioned\"\n\tvpa_api \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/client\/clientset\/versioned\/typed\/poc.autoscaling.k8s.io\/v1alpha1\"\n\tvpa_lister \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/client\/listers\/poc.autoscaling.k8s.io\/v1alpha1\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/history\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/metrics\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/oom\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/input\/spec\"\n\t\"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/recommender\/model\"\n\tvpa_api_util \"k8s.io\/autoscaler\/vertical-pod-autoscaler\/pkg\/utils\/vpa\"\n\tkube_client \"k8s.io\/client-go\/kubernetes\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\tv1lister \"k8s.io\/client-go\/listers\/core\/v1\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\tresourceclient \"k8s.io\/metrics\/pkg\/client\/clientset\/versioned\/typed\/metrics\/v1beta1\"\n)\n\n\/\/ ClusterStateFeeder can update state of ClusterState object.\ntype ClusterStateFeeder interface {\n\n\t\/\/ InitFromHistoryProvider loads historical pod spec into clusterState.\n\tInitFromHistoryProvider(historyProvider history.HistoryProvider)\n\n\t\/\/ InitFromCheckpoints loads historical checkpoints into clusterState.\n\tInitFromCheckpoints()\n\n\t\/\/ LoadVPAs updtes clusterState with current state of VPAs.\n\tLoadVPAs()\n\n\t\/\/ LoadPods updates slusterState with current specification of Pods and their Containers.\n\tLoadPods()\n\n\t\/\/ LoadRealTimeMetrics updates clusterState with current usage metrics of containers.\n\tLoadRealTimeMetrics()\n\n\t\/\/ GarbageCollectCheckpoints removes historical checkpoints that don't have a matching VPA.\n\tGarbageCollectCheckpoints()\n}\n\n\/\/ NewClusterStateFeeder creates new ClusterStateFeeder with internal data providers, based on kube client config and a historyProvider.\nfunc NewClusterStateFeeder(config *rest.Config, clusterState *model.ClusterState) ClusterStateFeeder {\n\tkubeClient := kube_client.NewForConfigOrDie(config)\n\toomObserver := oom.NewObserver()\n\tpodLister := newPodClients(kubeClient, &oomObserver)\n\twatchEvictionEventsWithRetries(kubeClient, &oomObserver)\n\treturn &clusterStateFeeder{\n\t\tcoreClient: kubeClient.CoreV1(),\n\t\tspecClient: spec.NewSpecClient(podLister),\n\t\tmetricsClient: newMetricsClient(config),\n\t\toomObserver: &oomObserver,\n\t\tvpaCheckpointClient: vpa_clientset.NewForConfigOrDie(config).PocV1alpha1(),\n\t\tvpaLister: vpa_api_util.NewAllVpasLister(vpa_clientset.NewForConfigOrDie(config), make(chan struct{})),\n\t\tclusterState: clusterState,\n\t}\n}\n\nfunc newMetricsClient(config *rest.Config) metrics.MetricsClient {\n\tmetricsGetter := resourceclient.NewForConfigOrDie(config)\n\treturn metrics.NewMetricsClient(metricsGetter)\n}\n\nfunc watchEvictionEventsWithRetries(kubeClient kube_client.Interface, observer *oom.Observer) {\n\tgo func() {\n\t\toptions := metav1.ListOptions{\n\t\t\tFieldSelector: \"reason=Evicted\",\n\t\t}\n\n\t\tfor {\n\t\t\twatchInterface, err := kubeClient.CoreV1().Events(\"\").Watch(options)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Cannot initialize watching events. Reason %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twatchEvictionEvents(watchInterface.ResultChan(), observer)\n\t\t}\n\t}()\n}\n\nfunc watchEvictionEvents(evictedEventChan <-chan watch.Event, observer *oom.Observer) {\n\tfor {\n\t\tevictedEvent, ok := <-evictedEventChan\n\t\tif !ok {\n\t\t\tglog.V(3).Infof(\"Eviction event chan closed\")\n\t\t\treturn\n\t\t}\n\t\tif evictedEvent.Type == watch.Added {\n\t\t\tevictedEvent, ok := evictedEvent.Object.(*apiv1.Event)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tobserver.OnEvent(evictedEvent)\n\t\t}\n\t}\n}\n\n\/\/ Creates clients watching pods: PodLister (listing only not terminated pods).\nfunc newPodClients(kubeClient kube_client.Interface, resourceEventHandler cache.ResourceEventHandler) v1lister.PodLister {\n\tselector := fields.ParseSelectorOrDie(\"status.phase!=\" + string(apiv1.PodPending))\n\tpodListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), \"pods\", apiv1.NamespaceAll, selector)\n\tindexer, controller := cache.NewIndexerInformer(\n\t\tpodListWatch,\n\t\t&apiv1.Pod{},\n\t\ttime.Hour,\n\t\tresourceEventHandler,\n\t\tcache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},\n\t)\n\tpodLister := v1lister.NewPodLister(indexer)\n\tstopCh := make(chan struct{})\n\tgo controller.Run(stopCh)\n\treturn podLister\n}\n\ntype clusterStateFeeder struct {\n\tcoreClient corev1.CoreV1Interface\n\tspecClient spec.SpecClient\n\tmetricsClient metrics.MetricsClient\n\toomObserver *oom.Observer\n\tvpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter\n\tvpaLister vpa_lister.VerticalPodAutoscalerLister\n\tclusterState *model.ClusterState\n}\n\nfunc (feeder *clusterStateFeeder) InitFromHistoryProvider(historyProvider history.HistoryProvider) {\n\tglog.V(3).Info(\"Initializing VPA from history provider\")\n\tclusterHistory, err := historyProvider.GetClusterHistory()\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot get cluster history: %v\", err)\n\t}\n\tfor podID, podHistory := range clusterHistory {\n\t\tglog.V(4).Infof(\"Adding pod %v with labels %v\", podID, podHistory.LastLabels)\n\t\tfeeder.clusterState.AddOrUpdatePod(podID, podHistory.LastLabels, apiv1.PodUnknown)\n\t\tfor containerName, sampleList := range podHistory.Samples {\n\t\t\tcontainerID := model.ContainerID{\n\t\t\t\tPodID: podID,\n\t\t\t\tContainerName: containerName}\n\t\t\tglog.V(4).Infof(\"Adding %d samples for container %v\", len(sampleList), containerID)\n\t\t\tfor _, sample := range sampleList {\n\t\t\t\tfeeder.clusterState.AddSample(\n\t\t\t\t\t&model.ContainerUsageSampleWithKey{\n\t\t\t\t\t\tContainerUsageSample: sample,\n\t\t\t\t\t\tContainer: containerID})\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpoint) error {\n\tvpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}\n\tvpa, exists := feeder.clusterState.Vpas[vpaID]\n\tif !exists {\n\t\treturn fmt.Errorf(\"Cannot load checkpoint to missing VPA object %+v\", vpaID)\n\t}\n\n\tcs := model.NewAggregateContainerState()\n\terr := cs.LoadFromCheckpoint(&checkpoint.Status)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot load checkpoint for VPA %+v. Reason: %v\", vpa.ID, err)\n\t}\n\tvpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs\n\treturn nil\n}\n\nfunc (feeder *clusterStateFeeder) InitFromCheckpoints() {\n\tglog.V(3).Info(\"Initializing VPA from checkpoints\")\n\tfeeder.LoadVPAs()\n\n\tnamespaces := make(map[string]bool)\n\tfor _, v := range feeder.clusterState.Vpas {\n\t\tnamespaces[v.ID.Namespace] = true\n\t}\n\n\tfor namespace := range namespaces {\n\t\tglog.V(3).Infof(\"Fetching checkpoints from namespace %s\", namespace)\n\t\tcheckpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(metav1.ListOptions{})\n\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot list VPA checkpoints from namespace %v. Reason: %+v\", namespace, err)\n\t\t}\n\t\tfor _, checkpoint := range checkpointList.Items {\n\n\t\t\tglog.V(3).Infof(\"Loading VPA %s\/%s checkpoint for %s\", checkpoint.ObjectMeta.Namespace, checkpoint.Spec.VPAObjectName, checkpoint.Spec.ContainerName)\n\t\t\terr = feeder.setVpaCheckpoint(&checkpoint)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Error while loading checkpoint. Reason: %+v\", err)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (feeder *clusterStateFeeder) GarbageCollectCheckpoints() {\n\tglog.V(3).Info(\"Starting garbage collection of checkpoints\")\n\tfeeder.LoadVPAs()\n\n\tnamspaceList, err := feeder.coreClient.Namespaces().List(metav1.ListOptions{})\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot list namespaces. Reason: %+v\", err)\n\t\treturn\n\t}\n\n\tfor _, namespaceItem := range namspaceList.Items {\n\t\tnamespace := namespaceItem.Name\n\t\tcheckpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot list VPA checkpoints from namespace %v. Reason: %+v\", namespace, err)\n\t\t}\n\t\tfor _, checkpoint := range checkpointList.Items {\n\t\t\tvpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}\n\t\t\t_, exists := feeder.clusterState.Vpas[vpaID]\n\t\t\tif !exists {\n\t\t\t\terr = feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).Delete(checkpoint.Name, &metav1.DeleteOptions{})\n\t\t\t\tif err == nil {\n\t\t\t\t\tglog.V(3).Infof(\"Orphaned VPA checkpoint cleanup - deleting %v\/%v.\", namespace, checkpoint.Name)\n\t\t\t\t} else {\n\t\t\t\t\tglog.Errorf(\"Cannot delete VPA checkpoint %v\/%v. Reason: %+v\", namespace, checkpoint.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Fetch VPA objects and load them into the cluster state.\nfunc (feeder *clusterStateFeeder) LoadVPAs() {\n\t\/\/ List VPA API objects.\n\tvpaCRDs, err := feeder.vpaLister.List(labels.Everything())\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot list VPAs. Reason: %+v\", err)\n\t} else {\n\t\tglog.V(3).Infof(\"Fetched %d VPAs.\", len(vpaCRDs))\n\t}\n\t\/\/ Add or update existing VPAs in the model.\n\tvpaKeys := make(map[model.VpaID]bool)\n\tfor n, vpaCRD := range vpaCRDs {\n\t\tglog.V(3).Infof(\"VPA CRD #%v: %+v\", n, vpaCRD)\n\t\tvpaID := model.VpaID{\n\t\t\tNamespace: vpaCRD.Namespace,\n\t\t\tVpaName: vpaCRD.Name}\n\t\tif feeder.clusterState.AddOrUpdateVpa(vpaCRD) == nil {\n\t\t\t\/\/ Successfully added VPA to the model.\n\t\t\tvpaKeys[vpaID] = true\n\t\t}\n\t}\n\t\/\/ Delete non-existent VPAs from the model.\n\tfor vpaID := range feeder.clusterState.Vpas {\n\t\tif _, exists := vpaKeys[vpaID]; !exists {\n\t\t\tglog.V(3).Infof(\"Deleting VPA %v\", vpaID)\n\t\t\tfeeder.clusterState.DeleteVpa(vpaID)\n\t\t}\n\t}\n}\n\n\/\/ Load pod into the cluster state.\nfunc (feeder *clusterStateFeeder) LoadPods() {\n\tpodSpecs, err := feeder.specClient.GetPodSpecs()\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot get SimplePodSpecs. Reason: %+v\", err)\n\t}\n\tpods := make(map[model.PodID]*spec.BasicPodSpec)\n\tfor n, spec := range podSpecs {\n\t\tglog.V(3).Infof(\"SimplePodSpec #%v: %+v\", n, spec)\n\t\tpods[spec.ID] = spec\n\t}\n\tfor key := range feeder.clusterState.Pods {\n\t\tif _, exists := pods[key]; !exists {\n\t\t\tglog.V(3).Infof(\"Deleting Pod %v\", key)\n\t\t\tfeeder.clusterState.DeletePod(key)\n\t\t}\n\t}\n\tfor _, pod := range pods {\n\t\tfeeder.clusterState.AddOrUpdatePod(pod.ID, pod.PodLabels, pod.Phase)\n\t\tfor _, container := range pod.Containers {\n\t\t\tfeeder.clusterState.AddOrUpdateContainer(container.ID, container.Request)\n\t\t}\n\t}\n}\n\nfunc (feeder *clusterStateFeeder) LoadRealTimeMetrics() {\n\tcontainersMetrics, err := feeder.metricsClient.GetContainersMetrics()\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot get ContainerMetricsSnapshot from MetricsClient. Reason: %+v\", err)\n\t}\n\n\tsampleCount := 0\n\tfor _, containerMetrics := range containersMetrics {\n\t\tfor _, sample := range newContainerUsageSamplesWithKey(containerMetrics) {\n\t\t\tfeeder.clusterState.AddSample(sample)\n\t\t\tsampleCount++\n\t\t}\n\t}\n\tglog.V(3).Infof(\"ClusterSpec fed with #%v ContainerUsageSamples for #%v containers\", sampleCount, len(containersMetrics))\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase oomInfo := <-feeder.oomObserver.ObservedOomsChannel:\n\t\t\tglog.V(3).Infof(\"OOM detected %+v\", oomInfo)\n\t\t\tcontainer := model.ContainerID{\n\t\t\t\tPodID: model.PodID{\n\t\t\t\t\tNamespace: oomInfo.Namespace,\n\t\t\t\t\tPodName: oomInfo.Pod,\n\t\t\t\t},\n\t\t\t\tContainerName: oomInfo.Container,\n\t\t\t}\n\t\t\tfeeder.clusterState.RecordOOM(container, oomInfo.Timestamp, model.ResourceAmount(oomInfo.Memory.Value()))\n\t\tdefault:\n\t\t\tbreak Loop\n\t\t}\n\t}\n}\n\nfunc newContainerUsageSamplesWithKey(metrics *metrics.ContainerMetricsSnapshot) []*model.ContainerUsageSampleWithKey {\n\tvar samples []*model.ContainerUsageSampleWithKey\n\n\tfor metricName, resourceAmount := range metrics.Usage {\n\t\tsample := &model.ContainerUsageSampleWithKey{\n\t\t\tContainer: metrics.ID,\n\t\t\tContainerUsageSample: model.ContainerUsageSample{\n\t\t\t\tMeasureStart: metrics.SnapshotTime,\n\t\t\t\tResource: metricName,\n\t\t\t\tUsage: resourceAmount,\n\t\t\t},\n\t\t}\n\t\tsamples = append(samples, sample)\n\t}\n\treturn samples\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage stackdriver\n\nimport (\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\tsd \"google.golang.org\/api\/logging\/v2\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/clock\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tapi_v1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n)\n\ntype fakeSdWriter struct {\n\twriteFunc func([]*sd.LogEntry, string, *sd.MonitoredResource) int\n}\n\nfunc (w *fakeSdWriter) Write(entries []*sd.LogEntry, logName string, resource *sd.MonitoredResource) int {\n\tif w.writeFunc != nil {\n\t\treturn w.writeFunc(entries, logName, resource)\n\t}\n\treturn 0\n}\n\nfunc TestMaxConcurrency(t *testing.T) {\n\tvar writeCalledTimes int32\n\tw := &fakeSdWriter{\n\t\twriteFunc: func([]*sd.LogEntry, string, *sd.MonitoredResource) int {\n\t\t\tatomic.AddInt32(&writeCalledTimes, 1)\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\treturn 0\n\t\t},\n\t}\n\tconfig := &sdSinkConfig{\n\t\tResource: nil,\n\t\tFlushDelay: 100 * time.Millisecond,\n\t\tLogName: \"logname\",\n\t\tMaxConcurrency: 10,\n\t\tMaxBufferSize: 10,\n\t}\n\ts := newSdSink(w, clock.NewFakeClock(time.Time{}), config)\n\tgo s.Run(wait.NeverStop)\n\n\tfor i := 0; i < 110; i++ {\n\t\ts.OnAdd(&api_v1.Event{})\n\t}\n\n\tif writeCalledTimes != int32(config.MaxConcurrency) {\n\t\tt.Fatalf(\"writeCalledTimes = %d, expected %d\", writeCalledTimes, config.MaxConcurrency)\n\t}\n}\n\nfunc TestBatchTimeout(t *testing.T) {\n\tvar writeCalledTimes int32\n\tw := &fakeSdWriter{\n\t\twriteFunc: func([]*sd.LogEntry, string, *sd.MonitoredResource) int {\n\t\t\tatomic.AddInt32(&writeCalledTimes, 1)\n\t\t\treturn 0\n\t\t},\n\t}\n\tconfig := &sdSinkConfig{\n\t\tResource: nil,\n\t\tFlushDelay: 100 * time.Millisecond,\n\t\tLogName: \"logname\",\n\t\tMaxConcurrency: 10,\n\t\tMaxBufferSize: 10,\n\t}\n\ts := newSdSink(w, clock.NewFakeClock(time.Time{}), config)\n\tgo s.Run(wait.NeverStop)\n\n\ts.OnAdd(&api_v1.Event{})\n\ttime.Sleep(200 * time.Millisecond)\n\n\tif writeCalledTimes != 1 {\n\t\tt.Fatalf(\"writeCalledTimes = %d, expected 1\", writeCalledTimes)\n\t}\n}\n\nfunc TestBatchSizeLimit(t *testing.T) {\n\tvar writeCalledTimes int32\n\tw := &fakeSdWriter{\n\t\twriteFunc: func([]*sd.LogEntry, string, *sd.MonitoredResource) int {\n\t\t\tatomic.AddInt32(&writeCalledTimes, 1)\n\t\t\treturn 0\n\t\t},\n\t}\n\tconfig := &sdSinkConfig{\n\t\tResource: nil,\n\t\tFlushDelay: 1 * time.Second,\n\t\tLogName: \"logname\",\n\t\tMaxConcurrency: 10,\n\t\tMaxBufferSize: 10,\n\t}\n\ts := newSdSink(w, clock.NewFakeClock(time.Time{}), config)\n\tgo s.Run(wait.NeverStop)\n\n\tfor i := 0; i < 15; i++ {\n\t\ts.OnAdd(&api_v1.Event{})\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif writeCalledTimes != 1 {\n\t\tt.Fatalf(\"writeCalledTimes = %d, expected 1\", writeCalledTimes)\n\t}\n}\nMake event-exporter unit tests more robust\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage stackdriver\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tsd \"google.golang.org\/api\/logging\/v2\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/clock\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tapi_v1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n)\n\ntype fakeSdWriter struct {\n\twriteFunc func([]*sd.LogEntry, string, *sd.MonitoredResource) int\n}\n\nfunc (w *fakeSdWriter) Write(entries []*sd.LogEntry, logName string, resource *sd.MonitoredResource) int {\n\tif w.writeFunc != nil {\n\t\treturn w.writeFunc(entries, logName, resource)\n\t}\n\treturn 0\n}\n\nfunc TestMaxConcurrency(t *testing.T) {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\tconfig := &sdSinkConfig{\n\t\tResource: nil,\n\t\tFlushDelay: 100 * time.Millisecond,\n\t\tLogName: \"logname\",\n\t\tMaxConcurrency: 10,\n\t\tMaxBufferSize: 10,\n\t}\n\tq := make(chan struct{}, config.MaxConcurrency+1)\n\tw := &fakeSdWriter{\n\t\twriteFunc: func([]*sd.LogEntry, string, *sd.MonitoredResource) int {\n\t\t\tq <- struct{}{}\n\t\t\t<-done\n\t\t\treturn 0\n\t\t},\n\t}\n\ts := newSdSink(w, clock.NewFakeClock(time.Time{}), config)\n\tgo s.Run(done)\n\n\tfor i := 0; i < config.MaxConcurrency*(config.MaxBufferSize+2); i++ {\n\t\ts.OnAdd(&api_v1.Event{})\n\t}\n\n\tif len(q) != config.MaxConcurrency {\n\t\tt.Fatalf(\"Write called %d times, expected %d\", len(q), config.MaxConcurrency)\n\t}\n}\n\nfunc TestBatchTimeout(t *testing.T) {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\tconfig := &sdSinkConfig{\n\t\tResource: nil,\n\t\tFlushDelay: 100 * time.Millisecond,\n\t\tLogName: \"logname\",\n\t\tMaxConcurrency: 10,\n\t\tMaxBufferSize: 10,\n\t}\n\tq := make(chan struct{}, config.MaxConcurrency+1)\n\tw := &fakeSdWriter{\n\t\twriteFunc: func([]*sd.LogEntry, string, *sd.MonitoredResource) int {\n\t\t\tq <- struct{}{}\n\t\t\treturn 0\n\t\t},\n\t}\n\ts := newSdSink(w, clock.NewFakeClock(time.Time{}), config)\n\tgo s.Run(done)\n\n\ts.OnAdd(&api_v1.Event{})\n\twait.Poll(100*time.Millisecond, 1*time.Second, func() (bool, error) {\n\t\treturn len(q) == 1, nil\n\t})\n\n\tif len(q) != 1 {\n\t\tt.Fatalf(\"Write called %d times, expected 1\", len(q))\n\t}\n}\n\nfunc TestBatchSizeLimit(t *testing.T) {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\tconfig := &sdSinkConfig{\n\t\tResource: nil,\n\t\tFlushDelay: 1 * time.Minute,\n\t\tLogName: \"logname\",\n\t\tMaxConcurrency: 10,\n\t\tMaxBufferSize: 10,\n\t}\n\tq := make(chan struct{}, config.MaxConcurrency+1)\n\tw := &fakeSdWriter{\n\t\twriteFunc: func([]*sd.LogEntry, string, *sd.MonitoredResource) int {\n\t\t\tq <- struct{}{}\n\t\t\treturn 0\n\t\t},\n\t}\n\ts := newSdSink(w, clock.NewFakeClock(time.Time{}), config)\n\tgo s.Run(done)\n\n\tfor i := 0; i < 15; i++ {\n\t\ts.OnAdd(&api_v1.Event{})\n\t}\n\n\twait.Poll(100*time.Millisecond, 1*time.Second, func() (bool, error) {\n\t\treturn len(q) == 1, nil\n\t})\n\n\tif len(q) != 1 {\n\t\tt.Fatalf(\"Write called %d times, expected 1\", len(q))\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2014 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sync\/atomic\"\n\n\tdto \"github.com\/prometheus\/client_model\/go\"\n)\n\n\/\/ Counter is a Metric that represents a single numerical value that only ever\n\/\/ goes up. That implies that it cannot be used to count items whose number can\n\/\/ also go down, e.g. the number of currently running goroutines. Those\n\/\/ \"counters\" are represented by Gauges.\n\/\/\n\/\/ A Counter is typically used to count requests served, tasks completed, errors\n\/\/ occurred, etc.\n\/\/\n\/\/ To create Counter instances, use NewCounter.\ntype Counter interface {\n\tMetric\n\tCollector\n\n\t\/\/ Inc increments the counter by 1. Use Add to increment it by arbitrary\n\t\/\/ non-negative values.\n\tInc()\n\t\/\/ Add adds the given value to the counter. It panics if the value is <\n\t\/\/ 0.\n\tAdd(float64)\n}\n\n\/\/ CounterOpts is an alias for Opts. See there for doc comments.\ntype CounterOpts Opts\n\n\/\/ NewCounter creates a new Counter based on the provided CounterOpts.\n\/\/\n\/\/ The returned implementation tracks the counter value in two separate\n\/\/ variables, a float64 and a uint64. The latter is used to track calls of the\n\/\/ Inc method and calls of the Add method with a value that can be represented\n\/\/ as a uint64. This allows atomic increments of the counter with optimal\n\/\/ performance. (It is common to have an Inc call in very hot execution paths.)\n\/\/ Both internal tracking values are added up in the Write method. This has to\n\/\/ be taken into account when it comes to precision and overflow behavior.\nfunc NewCounter(opts CounterOpts) Counter {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t)\n\tresult := &counter{desc: desc, labelPairs: desc.constLabelPairs}\n\tresult.init(result) \/\/ Init self-collection.\n\treturn result\n}\n\ntype counter struct {\n\t\/\/ valBits contains the bits of the represented float64 value, while\n\t\/\/ valInt stores values that are exact integers. Both have to go first\n\t\/\/ in the struct to guarantee alignment for atomic operations.\n\t\/\/ http:\/\/golang.org\/pkg\/sync\/atomic\/#pkg-note-BUG\n\tvalBits uint64\n\tvalInt uint64\n\n\tselfCollector\n\tdesc *Desc\n\n\tlabelPairs []*dto.LabelPair\n}\n\nfunc (c *counter) Desc() *Desc {\n\treturn c.desc\n}\n\nfunc (c *counter) Add(v float64) {\n\tif v < 0 {\n\t\tpanic(errors.New(\"counter cannot decrease in value\"))\n\t}\n\tival := uint64(v)\n\tif float64(ival) == v {\n\t\tatomic.AddUint64(&c.valInt, ival)\n\t\treturn\n\t}\n\n\tfor {\n\t\toldBits := atomic.LoadUint64(&c.valBits)\n\t\tnewBits := math.Float64bits(math.Float64frombits(oldBits) + v)\n\t\tif atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *counter) Inc() {\n\tatomic.AddUint64(&c.valInt, 1)\n}\n\nfunc (c *counter) Write(out *dto.Metric) error {\n\tfval := math.Float64frombits(atomic.LoadUint64(&c.valBits))\n\tival := atomic.LoadUint64(&c.valInt)\n\tval := fval + float64(ival)\n\n\treturn populateMetric(CounterValue, val, c.labelPairs, out)\n}\n\n\/\/ CounterVec is a Collector that bundles a set of Counters that all share the\n\/\/ same Desc, but have different values for their variable labels. This is used\n\/\/ if you want to count the same thing partitioned by various dimensions\n\/\/ (e.g. number of HTTP requests, partitioned by response code and\n\/\/ method). Create instances with NewCounterVec.\ntype CounterVec struct {\n\t*metricVec\n}\n\n\/\/ NewCounterVec creates a new CounterVec based on the provided CounterOpts and\n\/\/ partitioned by the given label names.\nfunc NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabelNames,\n\t\topts.ConstLabels,\n\t)\n\treturn &CounterVec{\n\t\tmetricVec: newMetricVec(desc, func(lvs ...string) Metric {\n\t\t\tif len(lvs) != len(desc.variableLabels) {\n\t\t\t\tpanic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))\n\t\t\t}\n\t\t\tresult := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}\n\t\t\tresult.init(result) \/\/ Init self-collection.\n\t\t\treturn result\n\t\t}),\n\t}\n}\n\n\/\/ GetMetricWithLabelValues returns the Counter for the given slice of label\n\/\/ values (same order as the VariableLabels in Desc). If that combination of\n\/\/ label values is accessed for the first time, a new Counter is created.\n\/\/\n\/\/ It is possible to call this method without using the returned Counter to only\n\/\/ create the new Counter but leave it at its starting value 0. See also the\n\/\/ SummaryVec example.\n\/\/\n\/\/ Keeping the Counter for later use is possible (and should be considered if\n\/\/ performance is critical), but keep in mind that Reset, DeleteLabelValues and\n\/\/ Delete can be used to delete the Counter from the CounterVec. In that case,\n\/\/ the Counter will still exist, but it will not be exported anymore, even if a\n\/\/ Counter with the same label values is created later.\n\/\/\n\/\/ An error is returned if the number of label values is not the same as the\n\/\/ number of VariableLabels in Desc (minus any curried labels).\n\/\/\n\/\/ Note that for more than one label value, this method is prone to mistakes\n\/\/ caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as\n\/\/ an alternative to avoid that type of mistake. For higher label numbers, the\n\/\/ latter has a much more readable (albeit more verbose) syntax, but it comes\n\/\/ with a performance overhead (for creating and processing the Labels map).\n\/\/ See also the GaugeVec example.\nfunc (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {\n\tmetric, err := v.metricVec.getMetricWithLabelValues(lvs...)\n\tif metric != nil {\n\t\treturn metric.(Counter), err\n\t}\n\treturn nil, err\n}\n\n\/\/ GetMetricWith returns the Counter for the given Labels map (the label names\n\/\/ must match those of the VariableLabels in Desc). If that label map is\n\/\/ accessed for the first time, a new Counter is created. Implications of\n\/\/ creating a Counter without using it and keeping the Counter for later use are\n\/\/ the same as for GetMetricWithLabelValues.\n\/\/\n\/\/ An error is returned if the number and names of the Labels are inconsistent\n\/\/ with those of the VariableLabels in Desc (minus any curried labels).\n\/\/\n\/\/ This method is used for the same purpose as\n\/\/ GetMetricWithLabelValues(...string). See there for pros and cons of the two\n\/\/ methods.\nfunc (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {\n\tmetric, err := v.metricVec.getMetricWith(labels)\n\tif metric != nil {\n\t\treturn metric.(Counter), err\n\t}\n\treturn nil, err\n}\n\n\/\/ WithLabelValues works as GetMetricWithLabelValues, but panics where\n\/\/ GetMetricWithLabelValues would have returned an error. Not returning an\n\/\/ error allows shortcuts like\n\/\/ myVec.WithLabelValues(\"404\", \"GET\").Add(42)\nfunc (v *CounterVec) WithLabelValues(lvs ...string) Counter {\n\tc, err := v.GetMetricWithLabelValues(lvs...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n\/\/ With works as GetMetricWith, but panics where GetMetricWithLabels would have\n\/\/ returned an error. Not returning an error allows shortcuts like\n\/\/ myVec.With(prometheus.Labels{\"code\": \"404\", \"method\": \"GET\"}).Add(42)\nfunc (v *CounterVec) With(labels Labels) Counter {\n\tc, err := v.GetMetricWith(labels)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n\/\/ CurryWith returns a vector curried with the provided labels, i.e. the\n\/\/ returned vector has those labels pre-set for all labeled operations performed\n\/\/ on it. The cardinality of the curried vector is reduced accordingly. The\n\/\/ order of the remaining labels stays the same (just with the curried labels\n\/\/ taken out of the sequence – which is relevant for the\n\/\/ (GetMetric)WithLabelValues methods). It is possible to curry a curried\n\/\/ vector, but only with labels not yet used for currying before.\n\/\/\n\/\/ The metrics contained in the CounterVec are shared between the curried and\n\/\/ uncurried vectors. They are just accessed differently. Curried and uncurried\n\/\/ vectors behave identically in terms of collection. Only one must be\n\/\/ registered with a given registry (usually the uncurried version). The Reset\n\/\/ method deletes all metrics, even if called on a curried vector.\nfunc (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {\n\tvec, err := v.curryWith(labels)\n\tif vec != nil {\n\t\treturn &CounterVec{vec}, err\n\t}\n\treturn nil, err\n}\n\n\/\/ MustCurryWith works as CurryWith but panics where CurryWith would have\n\/\/ returned an error.\nfunc (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {\n\tvec, err := v.CurryWith(labels)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn vec\n}\n\n\/\/ CounterFunc is a Counter whose value is determined at collect time by calling a\n\/\/ provided function.\n\/\/\n\/\/ To create CounterFunc instances, use NewCounterFunc.\ntype CounterFunc interface {\n\tMetric\n\tCollector\n}\n\n\/\/ NewCounterFunc creates a new CounterFunc based on the provided\n\/\/ CounterOpts. The value reported is determined by calling the given function\n\/\/ from within the Write method. Take into account that metric collection may\n\/\/ happen concurrently. If that results in concurrent calls to Write, like in\n\/\/ the case where a CounterFunc is directly registered with Prometheus, the\n\/\/ provided function must be concurrency-safe. The function should also honor\n\/\/ the contract for a Counter (values only go up, not down), but compliance will\n\/\/ not be checked.\nfunc NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {\n\treturn newValueFunc(NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t), CounterValue, function)\n}\nRemove fmt from import\/\/ Copyright 2014 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"sync\/atomic\"\n\n\tdto \"github.com\/prometheus\/client_model\/go\"\n)\n\n\/\/ Counter is a Metric that represents a single numerical value that only ever\n\/\/ goes up. That implies that it cannot be used to count items whose number can\n\/\/ also go down, e.g. the number of currently running goroutines. Those\n\/\/ \"counters\" are represented by Gauges.\n\/\/\n\/\/ A Counter is typically used to count requests served, tasks completed, errors\n\/\/ occurred, etc.\n\/\/\n\/\/ To create Counter instances, use NewCounter.\ntype Counter interface {\n\tMetric\n\tCollector\n\n\t\/\/ Inc increments the counter by 1. Use Add to increment it by arbitrary\n\t\/\/ non-negative values.\n\tInc()\n\t\/\/ Add adds the given value to the counter. It panics if the value is <\n\t\/\/ 0.\n\tAdd(float64)\n}\n\n\/\/ CounterOpts is an alias for Opts. See there for doc comments.\ntype CounterOpts Opts\n\n\/\/ NewCounter creates a new Counter based on the provided CounterOpts.\n\/\/\n\/\/ The returned implementation tracks the counter value in two separate\n\/\/ variables, a float64 and a uint64. The latter is used to track calls of the\n\/\/ Inc method and calls of the Add method with a value that can be represented\n\/\/ as a uint64. This allows atomic increments of the counter with optimal\n\/\/ performance. (It is common to have an Inc call in very hot execution paths.)\n\/\/ Both internal tracking values are added up in the Write method. This has to\n\/\/ be taken into account when it comes to precision and overflow behavior.\nfunc NewCounter(opts CounterOpts) Counter {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t)\n\tresult := &counter{desc: desc, labelPairs: desc.constLabelPairs}\n\tresult.init(result) \/\/ Init self-collection.\n\treturn result\n}\n\ntype counter struct {\n\t\/\/ valBits contains the bits of the represented float64 value, while\n\t\/\/ valInt stores values that are exact integers. Both have to go first\n\t\/\/ in the struct to guarantee alignment for atomic operations.\n\t\/\/ http:\/\/golang.org\/pkg\/sync\/atomic\/#pkg-note-BUG\n\tvalBits uint64\n\tvalInt uint64\n\n\tselfCollector\n\tdesc *Desc\n\n\tlabelPairs []*dto.LabelPair\n}\n\nfunc (c *counter) Desc() *Desc {\n\treturn c.desc\n}\n\nfunc (c *counter) Add(v float64) {\n\tif v < 0 {\n\t\tpanic(errors.New(\"counter cannot decrease in value\"))\n\t}\n\tival := uint64(v)\n\tif float64(ival) == v {\n\t\tatomic.AddUint64(&c.valInt, ival)\n\t\treturn\n\t}\n\n\tfor {\n\t\toldBits := atomic.LoadUint64(&c.valBits)\n\t\tnewBits := math.Float64bits(math.Float64frombits(oldBits) + v)\n\t\tif atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *counter) Inc() {\n\tatomic.AddUint64(&c.valInt, 1)\n}\n\nfunc (c *counter) Write(out *dto.Metric) error {\n\tfval := math.Float64frombits(atomic.LoadUint64(&c.valBits))\n\tival := atomic.LoadUint64(&c.valInt)\n\tval := fval + float64(ival)\n\n\treturn populateMetric(CounterValue, val, c.labelPairs, out)\n}\n\n\/\/ CounterVec is a Collector that bundles a set of Counters that all share the\n\/\/ same Desc, but have different values for their variable labels. This is used\n\/\/ if you want to count the same thing partitioned by various dimensions\n\/\/ (e.g. number of HTTP requests, partitioned by response code and\n\/\/ method). Create instances with NewCounterVec.\ntype CounterVec struct {\n\t*metricVec\n}\n\n\/\/ NewCounterVec creates a new CounterVec based on the provided CounterOpts and\n\/\/ partitioned by the given label names.\nfunc NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabelNames,\n\t\topts.ConstLabels,\n\t)\n\treturn &CounterVec{\n\t\tmetricVec: newMetricVec(desc, func(lvs ...string) Metric {\n\t\t\tif len(lvs) != len(desc.variableLabels) {\n\t\t\t\tpanic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))\n\t\t\t}\n\t\t\tresult := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}\n\t\t\tresult.init(result) \/\/ Init self-collection.\n\t\t\treturn result\n\t\t}),\n\t}\n}\n\n\/\/ GetMetricWithLabelValues returns the Counter for the given slice of label\n\/\/ values (same order as the VariableLabels in Desc). If that combination of\n\/\/ label values is accessed for the first time, a new Counter is created.\n\/\/\n\/\/ It is possible to call this method without using the returned Counter to only\n\/\/ create the new Counter but leave it at its starting value 0. See also the\n\/\/ SummaryVec example.\n\/\/\n\/\/ Keeping the Counter for later use is possible (and should be considered if\n\/\/ performance is critical), but keep in mind that Reset, DeleteLabelValues and\n\/\/ Delete can be used to delete the Counter from the CounterVec. In that case,\n\/\/ the Counter will still exist, but it will not be exported anymore, even if a\n\/\/ Counter with the same label values is created later.\n\/\/\n\/\/ An error is returned if the number of label values is not the same as the\n\/\/ number of VariableLabels in Desc (minus any curried labels).\n\/\/\n\/\/ Note that for more than one label value, this method is prone to mistakes\n\/\/ caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as\n\/\/ an alternative to avoid that type of mistake. For higher label numbers, the\n\/\/ latter has a much more readable (albeit more verbose) syntax, but it comes\n\/\/ with a performance overhead (for creating and processing the Labels map).\n\/\/ See also the GaugeVec example.\nfunc (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {\n\tmetric, err := v.metricVec.getMetricWithLabelValues(lvs...)\n\tif metric != nil {\n\t\treturn metric.(Counter), err\n\t}\n\treturn nil, err\n}\n\n\/\/ GetMetricWith returns the Counter for the given Labels map (the label names\n\/\/ must match those of the VariableLabels in Desc). If that label map is\n\/\/ accessed for the first time, a new Counter is created. Implications of\n\/\/ creating a Counter without using it and keeping the Counter for later use are\n\/\/ the same as for GetMetricWithLabelValues.\n\/\/\n\/\/ An error is returned if the number and names of the Labels are inconsistent\n\/\/ with those of the VariableLabels in Desc (minus any curried labels).\n\/\/\n\/\/ This method is used for the same purpose as\n\/\/ GetMetricWithLabelValues(...string). See there for pros and cons of the two\n\/\/ methods.\nfunc (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {\n\tmetric, err := v.metricVec.getMetricWith(labels)\n\tif metric != nil {\n\t\treturn metric.(Counter), err\n\t}\n\treturn nil, err\n}\n\n\/\/ WithLabelValues works as GetMetricWithLabelValues, but panics where\n\/\/ GetMetricWithLabelValues would have returned an error. Not returning an\n\/\/ error allows shortcuts like\n\/\/ myVec.WithLabelValues(\"404\", \"GET\").Add(42)\nfunc (v *CounterVec) WithLabelValues(lvs ...string) Counter {\n\tc, err := v.GetMetricWithLabelValues(lvs...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n\/\/ With works as GetMetricWith, but panics where GetMetricWithLabels would have\n\/\/ returned an error. Not returning an error allows shortcuts like\n\/\/ myVec.With(prometheus.Labels{\"code\": \"404\", \"method\": \"GET\"}).Add(42)\nfunc (v *CounterVec) With(labels Labels) Counter {\n\tc, err := v.GetMetricWith(labels)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n\/\/ CurryWith returns a vector curried with the provided labels, i.e. the\n\/\/ returned vector has those labels pre-set for all labeled operations performed\n\/\/ on it. The cardinality of the curried vector is reduced accordingly. The\n\/\/ order of the remaining labels stays the same (just with the curried labels\n\/\/ taken out of the sequence – which is relevant for the\n\/\/ (GetMetric)WithLabelValues methods). It is possible to curry a curried\n\/\/ vector, but only with labels not yet used for currying before.\n\/\/\n\/\/ The metrics contained in the CounterVec are shared between the curried and\n\/\/ uncurried vectors. They are just accessed differently. Curried and uncurried\n\/\/ vectors behave identically in terms of collection. Only one must be\n\/\/ registered with a given registry (usually the uncurried version). The Reset\n\/\/ method deletes all metrics, even if called on a curried vector.\nfunc (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {\n\tvec, err := v.curryWith(labels)\n\tif vec != nil {\n\t\treturn &CounterVec{vec}, err\n\t}\n\treturn nil, err\n}\n\n\/\/ MustCurryWith works as CurryWith but panics where CurryWith would have\n\/\/ returned an error.\nfunc (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {\n\tvec, err := v.CurryWith(labels)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn vec\n}\n\n\/\/ CounterFunc is a Counter whose value is determined at collect time by calling a\n\/\/ provided function.\n\/\/\n\/\/ To create CounterFunc instances, use NewCounterFunc.\ntype CounterFunc interface {\n\tMetric\n\tCollector\n}\n\n\/\/ NewCounterFunc creates a new CounterFunc based on the provided\n\/\/ CounterOpts. The value reported is determined by calling the given function\n\/\/ from within the Write method. Take into account that metric collection may\n\/\/ happen concurrently. If that results in concurrent calls to Write, like in\n\/\/ the case where a CounterFunc is directly registered with Prometheus, the\n\/\/ provided function must be concurrency-safe. The function should also honor\n\/\/ the contract for a Counter (values only go up, not down), but compliance will\n\/\/ not be checked.\nfunc NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {\n\treturn newValueFunc(NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t), CounterValue, function)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2012 Gonéri Le Bouder. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport \"notmuch\"\nimport \"log\"\nimport \"encoding\/json\"\nimport \"os\"\nimport \"io\"\nimport \"fmt\"\nimport \"regexp\"\nimport \"net\/mail\"\nimport \"path\"\n\ntype Filter struct {\n\tField string\n\tPattern string\n\tRe *regexp.Regexp\n\tTags string\n}\n\ntype Result struct {\n\tMessageID string\n\tTags string\n\tDie bool\n Filename string\n}\n\nconst NCPU = 4 \/\/ number of CPU cores\n\nfunc getMaildirLoc() string {\n\t\/\/ honor NOTMUCH_CONFIG\n\thome := os.Getenv(\"NOTMUCH_CONFIG\")\n\tif home == \"\" {\n\t\thome = os.Getenv(\"HOME\")\n\t}\n\n\treturn path.Join(home, \"Maildir\")\n}\n\nfunc RefreshFlags(nmdb *notmuch.Database) {\n\n\tquery := nmdb.CreateQuery(\"tag:inbox and tag:delete\")\n\tmsgs := query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:archive\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:seen and not tag:list\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.AddTag(\"archive\")\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:seen and tag:list\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:seen and tag:bug\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tthreadId := msg.GetThreadId()\n\t\tfilter := fmt.Sprintf(\"thread:%s\", threadId)\n\n\t\tquery := nmdb.CreateQuery(filter)\n\t\tmsgs := query.SearchMessages()\n\t\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\t\tmsg := msgs.Get()\n\t\t\tmsg.AddTag(\"inbox\")\n\t\t}\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:killed\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tthreadId := msg.GetThreadId()\n\t\tfilter := fmt.Sprintf(\"thread:%s\", threadId)\n\n\t\tquery := nmdb.CreateQuery(filter)\n\t\tmsgs := query.SearchMessages()\n\t\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\t\tmsg := msgs.Get()\n\t\t\tmsg.RemoveTag(\"inbox\")\n\t\t}\n\t}\n\n\tnmdb.Close()\n\tfmt.Print(\"Ok\\n\")\n\n}\n\nfunc studyMsg(filter []Filter, filenameIn chan string, resultOut chan Result, quit chan bool) {\n\tfor {\n\t\tfilename := <-filenameIn\n\n\t\tif filename == \"\" {\n\t\t\tvar result Result\n\t\t\tresult.Die = true\n\t\t\tresultOut <- result\n\n\t\t\treturn\n\t\t}\n\t\t\/\/ We can use Notmuch for this directly because Xappian will\n\t\t\/\/ fails as soon as we have 2 concurrent goroutine\n\t\tfile, err := os.Open(filename) \/\/ For read access.\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tvar msg *mail.Message\n\t\tmsg, err = mail.ReadMessage(file)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tvar result Result\n\t\tresult.MessageID = msg.Header.Get(\"Message-Id\")\n if (result.MessageID == \"\") {\n fmt.Printf(\"No message ID for %s\\n\", filename)\n continue;\n }\n\t\tresult.Filename = filename\n\t\tfor _, f := range filter {\n\t\t\tif f.Re.MatchString(msg.Header.Get(f.Field)) {\n\t\t\t\tresult.Tags += \" \"\n\t\t\t\tresult.Tags += f.Tags\n\t\t\t}\n\n\t\t}\n\t\tfile.Close()\n\n\t\tresultOut <- result\n\t}\n}\n\nfunc loadFilter() (filter []Filter) {\n\n\tfile, err := os.Open(fmt.Sprintf(\"\/%s\/notmuch-filter.json\", getMaildirLoc())) \/\/ For read access.\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tdec := json.NewDecoder(file)\n\tfor {\n\t\tvar f Filter\n\t\tif err := dec.Decode(&f); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tvar err error = nil\n\t\tif f.Re, err = regexp.Compile(f.Pattern); err != nil {\n\t\t\tlog.Printf(\"error: %v\\n\", err)\n\t\t}\n\n\t\tfilter = append(filter, f)\n\t}\n\n\treturn filter\n}\n\nfunc studyMsgs(resultOut chan Result, quit chan bool, filenames []string) {\n\n\tfilter := loadFilter()\n\n\tfilenameIn := make(chan string)\n\tfor i := 0; i < NCPU+1; i++ {\n\t\tgo studyMsg(filter, filenameIn, resultOut, quit)\n\t}\n\tfor _, filename := range filenames {\n\t\tfilenameIn <- filename\n\t}\n\n\tfor i := 0; i < NCPU+1; i++ {\n\t\tfilenameIn <- \"\"\n\t}\n\n\tquit <- true\n}\n\nfunc main() {\n\tvar query *notmuch.Query\n\tvar nmdb *notmuch.Database\n\n\tif db, status := notmuch.OpenDatabase(getMaildirLoc(),\n\t\tnotmuch.DATABASE_MODE_READ_ONLY); status == notmuch.STATUS_SUCCESS {\n\t\tnmdb = db\n\t} else {\n\t\tlog.Fatalf(\"Failed to open the database: %v\\n\", status)\n\t}\n\n\tquit := make(chan bool)\n\tresultOut := make(chan Result)\n\n\tquery = nmdb.CreateQuery(\"tag:new\")\n\n\tprintln(\">\", query.CountMessages(), \"<\")\n\tmsgs := query.SearchMessages()\n\n\tvar filenames []string\n\tif query.CountMessages() > 0 {\n\t\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\t\tmsg := msgs.Get()\n\n\t\t\tfilenames = append(filenames, msg.GetFileName())\n\t\t}\n\t}\n\n\tgo studyMsgs(resultOut, quit, filenames)\n\n\t\/\/\tvar query *notmuch.Query\n\tvar msgIDRegexp = regexp.MustCompile(\"^<(.*)>$\")\n\tvar tagRegexp = regexp.MustCompile(\"([\\\\+-])(\\\\S+)\")\n\n\t\/\/ open the database\n\tif db, status := notmuch.OpenDatabase(getMaildirLoc(),\n\t\t1); status == notmuch.STATUS_SUCCESS {\n\t\tnmdb = db\n\t} else {\n\t\tlog.Fatalf(\"Failed to open the database: %v\\n\", status)\n\t}\n\tdefer nmdb.Close()\n\n\tvar running int = NCPU + 1\n\tfor {\n\t\tresult := <-resultOut\n\n\t\tif result.Die {\n\n\t\t\trunning--\n\n\t\t\tif running > 0 {\n\t\t\t\tcontinue\n\t\t\t} else {\n break\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Message-ID without the <>\n fmt.Printf(\"MessageID: %s\\n\", result.MessageID)\n\t\treResult := msgIDRegexp.FindStringSubmatch(result.MessageID)\n\t\tif (reResult == nil) {\n\t\t\tfmt.Printf(\"Can't parse MessageID for mail %s\\n\", result.Filename)\n\t\t\tcontinue\n\t\t}\n\n\t\tmsgID := reResult[1]\n\t\tfilter := \"id:\"\n\t\tfilter += msgID\n\t\tquery := nmdb.CreateQuery(filter)\n\t\tmsgs := query.SearchMessages()\n\t\tmsg := msgs.Get()\n\t\tif msg == nil {\n fmt.Printf(\"Can't find MessageID %s for mail %s\\n\", msgID, result.Filename)\n continue\n }\n\n\t\tfmt.Printf(\"%s, tags: %s\\n\", msgID, result.Tags)\n\t\tmsg.Freeze()\n\t\tfor _, v := range tagRegexp.FindAllStringSubmatch(result.Tags, -1) {\n\t\t\tif v[1] == \"+\" {\n\t\t\t\tmsg.AddTag(v[2])\n\t\t\t} else if v[1] == \"-\" {\n\t\t\t\tmsg.RemoveTag(v[2])\n\t\t\t}\n\t\t}\n\t\tmsg.Thaw()\n\n\t}\n RefreshFlags(nmdb)\n fmt.Printf(\"exit\\n\")\n os.Exit(0)\n\n\n}\nuse only one CPU\/\/ Copyright 2012 Gonéri Le Bouder. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport \"notmuch\"\nimport \"log\"\nimport \"encoding\/json\"\nimport \"os\"\nimport \"io\"\nimport \"fmt\"\nimport \"regexp\"\nimport \"net\/mail\"\nimport \"path\"\n\ntype Filter struct {\n\tField string\n\tPattern string\n\tRe *regexp.Regexp\n\tTags string\n}\n\ntype Result struct {\n\tMessageID string\n\tTags string\n\tDie bool\n Filename string\n}\n\nconst NCPU = 1 \/\/ number of CPU cores\n\nfunc getMaildirLoc() string {\n\t\/\/ honor NOTMUCH_CONFIG\n\thome := os.Getenv(\"NOTMUCH_CONFIG\")\n\tif home == \"\" {\n\t\thome = os.Getenv(\"HOME\")\n\t}\n\n\treturn path.Join(home, \"Maildir\")\n}\n\nfunc RefreshFlags(nmdb *notmuch.Database) {\n\n\tquery := nmdb.CreateQuery(\"tag:inbox and tag:delete\")\n\tmsgs := query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:archive\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:seen and not tag:list\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.AddTag(\"archive\")\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:seen and tag:list\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:seen and tag:bug\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tmsg.RemoveTag(\"inbox\")\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tthreadId := msg.GetThreadId()\n\t\tfilter := fmt.Sprintf(\"thread:%s\", threadId)\n\n\t\tquery := nmdb.CreateQuery(filter)\n\t\tmsgs := query.SearchMessages()\n\t\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\t\tmsg := msgs.Get()\n\t\t\tmsg.AddTag(\"inbox\")\n\t\t}\n\t}\n\n\tquery = nmdb.CreateQuery(\"tag:inbox and tag:killed\")\n\tmsgs = query.SearchMessages()\n\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\tmsg := msgs.Get()\n\t\tthreadId := msg.GetThreadId()\n\t\tfilter := fmt.Sprintf(\"thread:%s\", threadId)\n\n\t\tquery := nmdb.CreateQuery(filter)\n\t\tmsgs := query.SearchMessages()\n\t\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\t\tmsg := msgs.Get()\n\t\t\tmsg.RemoveTag(\"inbox\")\n\t\t}\n\t}\n\n\tnmdb.Close()\n\tfmt.Print(\"Ok\\n\")\n\n}\n\nfunc studyMsg(filter []Filter, filenameIn chan string, resultOut chan Result, quit chan bool) {\n\tfor {\n\t\tfilename := <-filenameIn\n\n\t\tif filename == \"\" {\n\t\t\tvar result Result\n\t\t\tresult.Die = true\n\t\t\tresultOut <- result\n\n\t\t\treturn\n\t\t}\n\t\t\/\/ We can use Notmuch for this directly because Xappian will\n\t\t\/\/ fails as soon as we have 2 concurrent goroutine\n\t\tfile, err := os.Open(filename) \/\/ For read access.\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tvar msg *mail.Message\n\t\tmsg, err = mail.ReadMessage(file)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tvar result Result\n\t\tresult.MessageID = msg.Header.Get(\"Message-Id\")\n if (result.MessageID == \"\") {\n fmt.Printf(\"No message ID for %s\\n\", filename)\n continue;\n }\n\t\tresult.Filename = filename\n\t\tfor _, f := range filter {\n\t\t\tif f.Re.MatchString(msg.Header.Get(f.Field)) {\n\t\t\t\tresult.Tags += \" \"\n\t\t\t\tresult.Tags += f.Tags\n\t\t\t}\n\n\t\t}\n\t\tfile.Close()\n\n\t\tresultOut <- result\n\t}\n}\n\nfunc loadFilter() (filter []Filter) {\n\n\tfile, err := os.Open(fmt.Sprintf(\"\/%s\/notmuch-filter.json\", getMaildirLoc())) \/\/ For read access.\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tdec := json.NewDecoder(file)\n\tfor {\n\t\tvar f Filter\n\t\tif err := dec.Decode(&f); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tvar err error = nil\n\t\tif f.Re, err = regexp.Compile(f.Pattern); err != nil {\n\t\t\tlog.Printf(\"error: %v\\n\", err)\n\t\t}\n\n\t\tfilter = append(filter, f)\n\t}\n\n\treturn filter\n}\n\nfunc studyMsgs(resultOut chan Result, quit chan bool, filenames []string) {\n\n\tfilter := loadFilter()\n\n\tfilenameIn := make(chan string)\n\tfor i := 0; i < NCPU+1; i++ {\n\t\tgo studyMsg(filter, filenameIn, resultOut, quit)\n\t}\n\tfor _, filename := range filenames {\n\t\tfilenameIn <- filename\n\t}\n\n\tfor i := 0; i < NCPU+1; i++ {\n\t\tfilenameIn <- \"\"\n\t}\n\n\tquit <- true\n}\n\nfunc main() {\n\tvar query *notmuch.Query\n\tvar nmdb *notmuch.Database\n\n\tif db, status := notmuch.OpenDatabase(getMaildirLoc(),\n\t\tnotmuch.DATABASE_MODE_READ_ONLY); status == notmuch.STATUS_SUCCESS {\n\t\tnmdb = db\n\t} else {\n\t\tlog.Fatalf(\"Failed to open the database: %v\\n\", status)\n\t}\n\n\tquit := make(chan bool)\n\tresultOut := make(chan Result)\n\n\tquery = nmdb.CreateQuery(\"tag:new\")\n\n\tprintln(\">\", query.CountMessages(), \"<\")\n\tmsgs := query.SearchMessages()\n\n\tvar filenames []string\n\tif query.CountMessages() > 0 {\n\t\tfor ; msgs.Valid(); msgs.MoveToNext() {\n\t\t\tmsg := msgs.Get()\n\n\t\t\tfilenames = append(filenames, msg.GetFileName())\n\t\t}\n\t}\n\n\tgo studyMsgs(resultOut, quit, filenames)\n\n\t\/\/\tvar query *notmuch.Query\n\tvar msgIDRegexp = regexp.MustCompile(\"^<(.*)>$\")\n\tvar tagRegexp = regexp.MustCompile(\"([\\\\+-])(\\\\S+)\")\n\n\t\/\/ open the database\n\tif db, status := notmuch.OpenDatabase(getMaildirLoc(),\n\t\t1); status == notmuch.STATUS_SUCCESS {\n\t\tnmdb = db\n\t} else {\n\t\tlog.Fatalf(\"Failed to open the database: %v\\n\", status)\n\t}\n\tdefer nmdb.Close()\n\n\tvar running int = NCPU + 1\n\tfor {\n\t\tresult := <-resultOut\n\n\t\tif result.Die {\n\n\t\t\trunning--\n\n\t\t\tif running > 0 {\n\t\t\t\tcontinue\n\t\t\t} else {\n break\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Message-ID without the <>\n fmt.Printf(\"MessageID: %s\\n\", result.MessageID)\n\t\treResult := msgIDRegexp.FindStringSubmatch(result.MessageID)\n\t\tif (reResult == nil) {\n\t\t\tfmt.Printf(\"Can't parse MessageID for mail %s\\n\", result.Filename)\n\t\t\tcontinue\n\t\t}\n\n\t\tmsgID := reResult[1]\n\t\tfilter := \"id:\"\n\t\tfilter += msgID\n\t\tquery := nmdb.CreateQuery(filter)\n\t\tmsgs := query.SearchMessages()\n\t\tmsg := msgs.Get()\n\t\tif msg == nil {\n fmt.Printf(\"Can't find MessageID %s for mail %s\\n\", msgID, result.Filename)\n continue\n }\n\n\t\tfmt.Printf(\"%s, tags: %s\\n\", msgID, result.Tags)\n\t\tmsg.Freeze()\n\t\tfor _, v := range tagRegexp.FindAllStringSubmatch(result.Tags, -1) {\n\t\t\tif v[1] == \"+\" {\n\t\t\t\tmsg.AddTag(v[2])\n\t\t\t} else if v[1] == \"-\" {\n\t\t\t\tmsg.RemoveTag(v[2])\n\t\t\t}\n\t\t}\n\t\tmsg.Thaw()\n\n\t}\n RefreshFlags(nmdb)\n fmt.Printf(\"exit\\n\")\n os.Exit(0)\n\n\n}\n<|endoftext|>"} {"text":"package match\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/ast\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/debug\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/gensym\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/scalar\"\n)\n\ntype desugarer struct {\n\tletBoundNames, lets []interface{}\n}\n\nfunc newDesugarer() *desugarer {\n\treturn &desugarer{nil, nil}\n}\n\nfunc (d *desugarer) desugar(x interface{}) interface{} {\n\tswitch x := x.(type) {\n\tcase ast.App:\n\t\treturn ast.NewApp(\n\t\t\td.desugar(x.Function()),\n\t\t\td.desugar(x.Arguments()).(ast.Arguments),\n\t\t\tx.DebugInfo())\n\tcase ast.Arguments:\n\t\tps := make([]ast.PositionalArgument, 0, len(x.Positionals()))\n\n\t\tfor _, p := range x.Positionals() {\n\t\t\tps = append(ps, d.desugar(p).(ast.PositionalArgument))\n\t\t}\n\n\t\tks := make([]ast.KeywordArgument, 0, len(x.Keywords()))\n\n\t\tfor _, k := range x.Keywords() {\n\t\t\tks = append(ks, d.desugar(k).(ast.KeywordArgument))\n\t\t}\n\n\t\tdicts := make([]interface{}, 0, len(x.ExpandedDicts()))\n\n\t\tfor _, dict := range x.ExpandedDicts() {\n\t\t\tdicts = append(dicts, d.desugar(dict))\n\t\t}\n\n\t\treturn ast.NewArguments(ps, ks, dicts)\n\tcase ast.KeywordArgument:\n\t\treturn ast.NewKeywordArgument(x.Name(), d.desugar(x.Value()))\n\tcase ast.LetFunction:\n\t\tls := make([]interface{}, 0, len(x.Lets()))\n\n\t\tfor _, l := range x.Lets() {\n\t\t\tl := d.desugar(l)\n\t\t\tls = append(ls, append(d.takeLets(), l)...)\n\t\t}\n\n\t\tb := d.desugar(x.Body())\n\t\tls = append(ls, d.takeLets()...)\n\n\t\treturn ast.NewLetFunction(\n\t\t\tx.Name(),\n\t\t\tx.Signature(),\n\t\t\tls,\n\t\t\tb,\n\t\t\tx.DebugInfo())\n\tcase ast.LetVar:\n\t\treturn ast.NewLetVar(x.Name(), d.desugar(x.Expr()))\n\tcase ast.Match:\n\t\tcs := make([]ast.MatchCase, 0, len(x.Cases()))\n\n\t\tfor _, c := range x.Cases() {\n\t\t\tcs = append(cs, renameBoundNamesInCase(ast.NewMatchCase(c.Pattern(), d.desugar(c.Value()))))\n\t\t}\n\n\t\treturn d.resultApp(d.createMatchFunction(cs), d.desugar(x.Value()))\n\tcase ast.Output:\n\t\treturn ast.NewOutput(d.desugar(x.Expr()), x.Expanded())\n\tcase ast.PositionalArgument:\n\t\treturn ast.NewPositionalArgument(d.desugar(x.Value()), x.Expanded())\n\tdefault:\n\t\treturn x\n\t}\n}\n\nfunc (d *desugarer) takeLets() []interface{} {\n\tls := append(d.letBoundNames, d.lets...)\n\td.letBoundNames = nil\n\td.lets = nil\n\treturn ls\n}\n\nfunc (d *desugarer) letVar(s string, v interface{}) string {\n\td.lets = append(d.lets, ast.NewLetVar(s, v))\n\treturn s\n}\n\nfunc (d *desugarer) bindName(s string, v interface{}) string {\n\td.letBoundNames = append(d.letBoundNames, ast.NewLetVar(s, v))\n\treturn s\n}\n\n\/\/ matchedApp applies a function to arguments and creates a matched value of\n\/\/ match expression.\nfunc (d *desugarer) matchedApp(f interface{}, args ...interface{}) string {\n\treturn d.bindName(gensym.GenSym(\"match\", \"app\"), app(f, args...))\n}\n\n\/\/ resultApp applies a function to arguments and creates a result value of match\n\/\/ expression.\nfunc (d *desugarer) resultApp(f interface{}, args ...interface{}) string {\n\treturn d.letVar(gensym.GenSym(\"match\", \"app\"), app(f, args...))\n}\n\nfunc (d *desugarer) createMatchFunction(cs []ast.MatchCase) interface{} {\n\targ := gensym.GenSym(\"match\", \"argument\")\n\tbody := d.desugarCases(arg, cs, \"$matchError\")\n\n\tf := ast.NewLetFunction(\n\t\tgensym.GenSym(\"match\", \"function\"),\n\t\tast.NewSignature([]string{arg}, nil, \"\", nil, nil, \"\"),\n\t\td.takeLets(),\n\t\tbody,\n\t\tdebug.NewGoInfo(0))\n\n\td.lets = append(d.lets, f)\n\n\treturn f.Name()\n}\n\nfunc (d *desugarer) desugarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tcss := groupCases(cs)\n\n\tif cs, ok := css[namePattern]; ok {\n\t\tc := cs[0]\n\t\td.bindName(c.Pattern().(string), v)\n\t\tdc = c.Value()\n\t}\n\n\tks := []ast.SwitchCase{}\n\n\tif cs, ok := css[listPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"list\\\"\", d.desugarListCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[dictPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"dict\\\"\", d.desugarDictCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[scalarPattern]; ok {\n\t\tdc = d.desugarScalarCases(v, cs, dc)\n\t}\n\n\treturn newSwitch(d.resultApp(\"$typeOf\", v), ks, dc)\n}\n\nfunc groupCases(cs []ast.MatchCase) map[patternType][]ast.MatchCase {\n\tcss := map[patternType][]ast.MatchCase{}\n\n\tfor i, c := range cs {\n\t\tt := getPatternType(c.Pattern())\n\n\t\tif t == namePattern && i < len(cs)-1 {\n\t\t\tpanic(\"A wildcard pattern is found while some patterns are left\")\n\t\t}\n\n\t\tcss[t] = append(css[t], c)\n\t}\n\n\treturn css\n}\n\nfunc getPatternType(p interface{}) patternType {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn scalarPattern\n\t\t}\n\n\t\treturn namePattern\n\tcase ast.App:\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn listPattern\n\t\tcase \"$dict\":\n\t\t\treturn dictPattern\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n\nfunc (d *desugarer) desugarListCases(list interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tfirst interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\tfirst := d.matchedApp(\"$first\", list)\n\trest := d.matchedApp(\"$rest\", list)\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", list, \"$emptyList\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tif ps[0].Expanded() {\n\t\t\tpanic(\"Not implemented\")\n\t\t}\n\n\t\tv := ps[0].Value()\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$list\", ast.NewArguments(ps[1:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif getPatternType(v) == namePattern {\n\t\t\td.bindName(v.(string), first)\n\t\t\tdc = d.desugarCases(\n\t\t\t\trest,\n\t\t\t\t[]ast.MatchCase{c},\n\t\t\t\td.desugarListCases(list, cs[i+1:], dc))\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.first) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tks := make([]ast.MatchCase, 0, len(gs))\n\n\tfor _, g := range gs {\n\t\tks = append(ks, ast.NewMatchCase(g.first, d.desugarCases(rest, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(first, ks, dc)\n}\n\nfunc (d *desugarer) desugarDictCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tkey interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\n\tfor _, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", v, \"$emptyDict\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tif ps[0].Expanded() {\n\t\t\tpanic(\"Not implemented\")\n\t\t}\n\n\t\tg := group{ps[0].Value(), []ast.MatchCase{c}}\n\n\t\tif len(gs) == 0 {\n\t\t\tgs = append(gs, g)\n\t\t} else if last := gs[len(gs)-1]; equalPatterns(g.key, last.key) {\n\t\t\tlast.cases = append(last.cases, c)\n\t\t} else {\n\t\t\tgs = append(gs, g)\n\t\t}\n\t}\n\n\tfor i := len(gs) - 1; i >= 0; i-- {\n\t\tg := gs[i]\n\t\tdc = d.resultApp(\"$if\",\n\t\t\tapp(\"$include\", v, g.key),\n\t\t\td.desugarDictCasesOfSameKey(v, g.cases, dc),\n\t\t\tdc)\n\t}\n\n\treturn dc\n}\n\nfunc (d *desugarer) desugarDictCasesOfSameKey(dict interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tvalue interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tkey := cs[0].Pattern().(ast.App).Arguments().Positionals()[0].Value()\n\tvalue := d.matchedApp(dict, key)\n\tnewDict := d.matchedApp(\"delete\", dict, key)\n\tgs := []group{}\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\t\tv := ps[1].Value()\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$dict\", ast.NewArguments(ps[2:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif getPatternType(v) == namePattern {\n\t\t\td.bindName(v.(string), value)\n\n\t\t\tif rest := cs[i+1:]; len(rest) != 0 {\n\t\t\t\tdc = d.desugarDictCasesOfSameKey(dict, rest, dc)\n\t\t\t}\n\n\t\t\tdc = d.desugarCases(newDict, []ast.MatchCase{c}, dc)\n\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.value) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tcs = make([]ast.MatchCase, 0, len(gs))\n\n\tfor _, g := range gs {\n\t\tcs = append(\n\t\t\tcs,\n\t\t\tast.NewMatchCase(g.value, d.desugarCases(newDict, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(value, cs, dc)\n}\n\nfunc (d *desugarer) desugarScalarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tks := []ast.SwitchCase{}\n\n\tfor _, c := range cs {\n\t\tks = append(ks, ast.NewSwitchCase(c.Pattern().(string), c.Value()))\n\t}\n\n\treturn newSwitch(v, ks, dc)\n}\n\nfunc renameBoundNamesInCase(c ast.MatchCase) ast.MatchCase {\n\tp, ns := newPatternRenamer().rename(c.Pattern())\n\treturn ast.NewMatchCase(p, newValueRenamer(ns).rename(c.Value()))\n}\n\nfunc equalPatterns(p, q interface{}) bool {\n\tswitch x := p.(type) {\n\tcase string:\n\t\ty, ok := q.(string)\n\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\treturn x == y\n\tcase ast.App:\n\t\ty, ok := q.(ast.App)\n\n\t\tif !ok ||\n\t\t\tx.Function().(string) != y.Function().(string) ||\n\t\t\tlen(x.Arguments().Positionals()) != len(y.Arguments().Positionals()) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor i := range x.Arguments().Positionals() {\n\t\t\tp := x.Arguments().Positionals()[i]\n\t\t\tq := y.Arguments().Positionals()[i]\n\n\t\t\tif p.Expanded() != q.Expanded() || !equalPatterns(p.Value(), q.Value()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v, %#v\", p, q))\n}\nCreate intermediate variables of default casespackage match\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/ast\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/debug\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/gensym\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/scalar\"\n)\n\ntype desugarer struct {\n\tletBoundNames, lets []interface{}\n}\n\nfunc newDesugarer() *desugarer {\n\treturn &desugarer{nil, nil}\n}\n\nfunc (d *desugarer) desugar(x interface{}) interface{} {\n\tswitch x := x.(type) {\n\tcase ast.App:\n\t\treturn ast.NewApp(\n\t\t\td.desugar(x.Function()),\n\t\t\td.desugar(x.Arguments()).(ast.Arguments),\n\t\t\tx.DebugInfo())\n\tcase ast.Arguments:\n\t\tps := make([]ast.PositionalArgument, 0, len(x.Positionals()))\n\n\t\tfor _, p := range x.Positionals() {\n\t\t\tps = append(ps, d.desugar(p).(ast.PositionalArgument))\n\t\t}\n\n\t\tks := make([]ast.KeywordArgument, 0, len(x.Keywords()))\n\n\t\tfor _, k := range x.Keywords() {\n\t\t\tks = append(ks, d.desugar(k).(ast.KeywordArgument))\n\t\t}\n\n\t\tdicts := make([]interface{}, 0, len(x.ExpandedDicts()))\n\n\t\tfor _, dict := range x.ExpandedDicts() {\n\t\t\tdicts = append(dicts, d.desugar(dict))\n\t\t}\n\n\t\treturn ast.NewArguments(ps, ks, dicts)\n\tcase ast.KeywordArgument:\n\t\treturn ast.NewKeywordArgument(x.Name(), d.desugar(x.Value()))\n\tcase ast.LetFunction:\n\t\tls := make([]interface{}, 0, len(x.Lets()))\n\n\t\tfor _, l := range x.Lets() {\n\t\t\tl := d.desugar(l)\n\t\t\tls = append(ls, append(d.takeLets(), l)...)\n\t\t}\n\n\t\tb := d.desugar(x.Body())\n\t\tls = append(ls, d.takeLets()...)\n\n\t\treturn ast.NewLetFunction(\n\t\t\tx.Name(),\n\t\t\tx.Signature(),\n\t\t\tls,\n\t\t\tb,\n\t\t\tx.DebugInfo())\n\tcase ast.LetVar:\n\t\treturn ast.NewLetVar(x.Name(), d.desugar(x.Expr()))\n\tcase ast.Match:\n\t\tcs := make([]ast.MatchCase, 0, len(x.Cases()))\n\n\t\tfor _, c := range x.Cases() {\n\t\t\tcs = append(cs, renameBoundNamesInCase(ast.NewMatchCase(c.Pattern(), d.desugar(c.Value()))))\n\t\t}\n\n\t\treturn d.resultApp(d.createMatchFunction(cs), d.desugar(x.Value()))\n\tcase ast.Output:\n\t\treturn ast.NewOutput(d.desugar(x.Expr()), x.Expanded())\n\tcase ast.PositionalArgument:\n\t\treturn ast.NewPositionalArgument(d.desugar(x.Value()), x.Expanded())\n\tdefault:\n\t\treturn x\n\t}\n}\n\nfunc (d *desugarer) takeLets() []interface{} {\n\tls := append(d.letBoundNames, d.lets...)\n\td.letBoundNames = nil\n\td.lets = nil\n\treturn ls\n}\n\nfunc (d *desugarer) letTempVar(v interface{}) string {\n\ts := gensym.GenSym(\"match\", \"tmp\")\n\td.lets = append(d.lets, ast.NewLetVar(s, v))\n\treturn s\n}\n\nfunc (d *desugarer) bindName(s string, v interface{}) string {\n\td.letBoundNames = append(d.letBoundNames, ast.NewLetVar(s, v))\n\treturn s\n}\n\n\/\/ matchedApp applies a function to arguments and creates a matched value of\n\/\/ match expression.\nfunc (d *desugarer) matchedApp(f interface{}, args ...interface{}) string {\n\treturn d.bindName(gensym.GenSym(\"match\", \"app\"), app(f, args...))\n}\n\n\/\/ resultApp applies a function to arguments and creates a result value of match\n\/\/ expression.\nfunc (d *desugarer) resultApp(f interface{}, args ...interface{}) string {\n\treturn d.letTempVar(app(f, args...))\n}\n\nfunc (d *desugarer) createMatchFunction(cs []ast.MatchCase) interface{} {\n\targ := gensym.GenSym(\"match\", \"argument\")\n\tbody := d.desugarCases(arg, cs, \"$matchError\")\n\n\tf := ast.NewLetFunction(\n\t\tgensym.GenSym(\"match\", \"function\"),\n\t\tast.NewSignature([]string{arg}, nil, \"\", nil, nil, \"\"),\n\t\td.takeLets(),\n\t\tbody,\n\t\tdebug.NewGoInfo(0))\n\n\td.lets = append(d.lets, f)\n\n\treturn f.Name()\n}\n\nfunc (d *desugarer) desugarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tcss := groupCases(cs)\n\n\tif cs, ok := css[namePattern]; ok {\n\t\tc := cs[0]\n\t\td.bindName(c.Pattern().(string), v)\n\t\tdc = c.Value()\n\t}\n\n\tks := []ast.SwitchCase{}\n\n\tif cs, ok := css[listPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"list\\\"\", d.desugarListCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[dictPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"dict\\\"\", d.desugarDictCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[scalarPattern]; ok {\n\t\tdc = d.desugarScalarCases(v, cs, dc)\n\t}\n\n\treturn newSwitch(d.resultApp(\"$typeOf\", v), ks, dc)\n}\n\nfunc groupCases(cs []ast.MatchCase) map[patternType][]ast.MatchCase {\n\tcss := map[patternType][]ast.MatchCase{}\n\n\tfor i, c := range cs {\n\t\tt := getPatternType(c.Pattern())\n\n\t\tif t == namePattern && i < len(cs)-1 {\n\t\t\tpanic(\"A wildcard pattern is found while some patterns are left\")\n\t\t}\n\n\t\tcss[t] = append(css[t], c)\n\t}\n\n\treturn css\n}\n\nfunc getPatternType(p interface{}) patternType {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn scalarPattern\n\t\t}\n\n\t\treturn namePattern\n\tcase ast.App:\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn listPattern\n\t\tcase \"$dict\":\n\t\t\treturn dictPattern\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n\nfunc (d *desugarer) desugarListCases(list interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tfirst interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\tfirst := d.matchedApp(\"$first\", list)\n\trest := d.matchedApp(\"$rest\", list)\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", list, \"$emptyList\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tif ps[0].Expanded() {\n\t\t\tpanic(\"Not implemented\")\n\t\t}\n\n\t\tv := ps[0].Value()\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$list\", ast.NewArguments(ps[1:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif getPatternType(v) == namePattern {\n\t\t\td.bindName(v.(string), first)\n\t\t\tdc = d.desugarCases(\n\t\t\t\trest,\n\t\t\t\t[]ast.MatchCase{c},\n\t\t\t\td.desugarListCases(list, cs[i+1:], dc))\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.first) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tks := make([]ast.MatchCase, 0, len(gs))\n\tdc = d.letTempVar(dc)\n\n\tfor _, g := range gs {\n\t\tks = append(ks, ast.NewMatchCase(g.first, d.desugarCases(rest, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(first, ks, dc)\n}\n\nfunc (d *desugarer) desugarDictCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tkey interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\n\tfor _, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", v, \"$emptyDict\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tif ps[0].Expanded() {\n\t\t\tpanic(\"Not implemented\")\n\t\t}\n\n\t\tg := group{ps[0].Value(), []ast.MatchCase{c}}\n\n\t\tif len(gs) == 0 {\n\t\t\tgs = append(gs, g)\n\t\t} else if last := gs[len(gs)-1]; equalPatterns(g.key, last.key) {\n\t\t\tlast.cases = append(last.cases, c)\n\t\t} else {\n\t\t\tgs = append(gs, g)\n\t\t}\n\t}\n\n\tfor i := len(gs) - 1; i >= 0; i-- {\n\t\tg := gs[i]\n\t\tdc = d.resultApp(\"$if\",\n\t\t\tapp(\"$include\", v, g.key),\n\t\t\td.desugarDictCasesOfSameKey(v, g.cases, dc),\n\t\t\tdc)\n\t}\n\n\treturn dc\n}\n\nfunc (d *desugarer) desugarDictCasesOfSameKey(dict interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tvalue interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tkey := cs[0].Pattern().(ast.App).Arguments().Positionals()[0].Value()\n\tvalue := d.matchedApp(dict, key)\n\tnewDict := d.matchedApp(\"delete\", dict, key)\n\tgs := []group{}\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\t\tv := ps[1].Value()\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$dict\", ast.NewArguments(ps[2:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif getPatternType(v) == namePattern {\n\t\t\td.bindName(v.(string), value)\n\n\t\t\tif rest := cs[i+1:]; len(rest) != 0 {\n\t\t\t\tdc = d.desugarDictCasesOfSameKey(dict, rest, dc)\n\t\t\t}\n\n\t\t\tdc = d.desugarCases(newDict, []ast.MatchCase{c}, dc)\n\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.value) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tcs = make([]ast.MatchCase, 0, len(gs))\n\tdc = d.letTempVar(dc)\n\n\tfor _, g := range gs {\n\t\tcs = append(\n\t\t\tcs,\n\t\t\tast.NewMatchCase(g.value, d.desugarCases(newDict, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(value, cs, dc)\n}\n\nfunc (d *desugarer) desugarScalarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tks := []ast.SwitchCase{}\n\n\tfor _, c := range cs {\n\t\tks = append(ks, ast.NewSwitchCase(c.Pattern().(string), c.Value()))\n\t}\n\n\treturn newSwitch(v, ks, dc)\n}\n\nfunc renameBoundNamesInCase(c ast.MatchCase) ast.MatchCase {\n\tp, ns := newPatternRenamer().rename(c.Pattern())\n\treturn ast.NewMatchCase(p, newValueRenamer(ns).rename(c.Value()))\n}\n\nfunc equalPatterns(p, q interface{}) bool {\n\tswitch x := p.(type) {\n\tcase string:\n\t\ty, ok := q.(string)\n\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\treturn x == y\n\tcase ast.App:\n\t\ty, ok := q.(ast.App)\n\n\t\tif !ok ||\n\t\t\tx.Function().(string) != y.Function().(string) ||\n\t\t\tlen(x.Arguments().Positionals()) != len(y.Arguments().Positionals()) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor i := range x.Arguments().Positionals() {\n\t\t\tp := x.Arguments().Positionals()[i]\n\t\t\tq := y.Arguments().Positionals()[i]\n\n\t\t\tif p.Expanded() != q.Expanded() || !equalPatterns(p.Value(), q.Value()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v, %#v\", p, q))\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lsp\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\n\t\"golang.org\/x\/tools\/internal\/jsonrpc2\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/protocol\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/source\"\n\t\"golang.org\/x\/tools\/internal\/span\"\n\terrors \"golang.org\/x\/xerrors\"\n)\n\n\/\/ ModificationSource identifies the originating cause of a file modification.\ntype ModificationSource int\n\nconst (\n\t\/\/ FromDidOpen is a file modification caused by opening a file.\n\tFromDidOpen = ModificationSource(iota)\n\t\/\/ FromDidChange is a file modification caused by changing a file.\n\tFromDidChange\n\t\/\/ FromDidChangeWatchedFiles is a file modification caused by a change to a watched file.\n\tFromDidChangeWatchedFiles\n\t\/\/ FromDidSave is a file modification caused by a file save.\n\tFromDidSave\n\t\/\/ FromDidClose is a file modification caused by closing a file.\n\tFromDidClose\n\tFromRegenerateCgo\n)\n\nfunc (m ModificationSource) String() string {\n\tswitch m {\n\tcase FromDidOpen:\n\t\treturn \"opened files\"\n\tcase FromDidChange:\n\t\treturn \"changed files\"\n\tcase FromDidChangeWatchedFiles:\n\t\treturn \"files changed on disk\"\n\tcase FromDidSave:\n\t\treturn \"saved files\"\n\tcase FromRegenerateCgo:\n\t\treturn \"regenerate cgo\"\n\tdefault:\n\t\treturn \"unknown file modification\"\n\t}\n}\n\nfunc (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\n\t_, err := s.didModifyFiles(ctx, []source.FileModification{\n\t\t{\n\t\t\tURI: uri,\n\t\t\tAction: source.Open,\n\t\t\tVersion: params.TextDocument.Version,\n\t\t\tText: []byte(params.TextDocument.Text),\n\t\t\tLanguageID: params.TextDocument.LanguageID,\n\t\t},\n\t}, FromDidOpen)\n\treturn err\n}\n\nfunc (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\n\ttext, err := s.changedText(ctx, uri, params.ContentChanges)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := source.FileModification{\n\t\tURI: uri,\n\t\tAction: source.Change,\n\t\tVersion: params.TextDocument.Version,\n\t\tText: text,\n\t}\n\tsnapshots, err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidChange)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsnapshot := snapshots[uri]\n\tif snapshot == nil {\n\t\treturn errors.Errorf(\"no snapshot for %s\", uri)\n\t}\n\t\/\/ Ideally, we should be able to specify that a generated file should be opened as read-only.\n\t\/\/ Tell the user that they should not be editing a generated file.\n\tif s.wasFirstChange(uri) && source.IsGenerated(ctx, snapshot, uri) {\n\t\tif err := s.client.ShowMessage(ctx, &protocol.ShowMessageParams{\n\t\t\tMessage: fmt.Sprintf(\"Do not edit this file! %s is a generated file.\", uri.Filename()),\n\t\t\tType: protocol.Warning,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Server) didChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error {\n\tvar modifications []source.FileModification\n\tdeletions := make(map[span.URI]struct{})\n\tfor _, change := range params.Changes {\n\t\turi := change.URI.SpanURI()\n\t\tif !uri.IsFile() {\n\t\t\tcontinue\n\t\t}\n\t\taction := changeTypeToFileAction(change.Type)\n\t\tmodifications = append(modifications, source.FileModification{\n\t\t\tURI: uri,\n\t\t\tAction: action,\n\t\t\tOnDisk: true,\n\t\t})\n\t\t\/\/ Keep track of deleted files so that we can clear their diagnostics.\n\t\t\/\/ A file might be re-created after deletion, so only mark files that\n\t\t\/\/ have truly been deleted.\n\t\tswitch action {\n\t\tcase source.Delete:\n\t\t\tdeletions[uri] = struct{}{}\n\t\tcase source.Close:\n\t\tdefault:\n\t\t\tdelete(deletions, uri)\n\t\t}\n\t}\n\tsnapshots, err := s.didModifyFiles(ctx, modifications, FromDidChangeWatchedFiles)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Clear the diagnostics for any deleted files that are not open in the editor.\n\tfor uri := range deletions {\n\t\tif snapshot := snapshots[uri]; snapshot == nil || snapshot.IsOpen(uri) {\n\t\t\tcontinue\n\t\t}\n\t\tif err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{\n\t\t\tURI: protocol.URIFromSpanURI(uri),\n\t\t\tDiagnostics: []protocol.Diagnostic{},\n\t\t\tVersion: 0,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Server) didSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\tc := source.FileModification{\n\t\tURI: uri,\n\t\tAction: source.Save,\n\t\tVersion: params.TextDocument.Version,\n\t}\n\tif params.Text != nil {\n\t\tc.Text = []byte(*params.Text)\n\t}\n\t_, err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidSave)\n\treturn err\n}\n\nfunc (s *Server) didClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\tsnapshots, err := s.didModifyFiles(ctx, []source.FileModification{\n\t\t{\n\t\t\tURI: uri,\n\t\t\tAction: source.Close,\n\t\t\tVersion: -1,\n\t\t\tText: nil,\n\t\t},\n\t}, FromDidClose)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsnapshot := snapshots[uri]\n\tif snapshot == nil {\n\t\treturn errors.Errorf(\"no snapshot for %s\", uri)\n\t}\n\tfh, err := snapshot.GetFile(uri)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ If a file has been closed and is not on disk, clear its diagnostics.\n\tif _, _, err := fh.Read(ctx); err != nil {\n\t\treturn s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{\n\t\t\tURI: protocol.URIFromSpanURI(uri),\n\t\t\tDiagnostics: []protocol.Diagnostic{},\n\t\t\tVersion: 0,\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) (map[span.URI]source.Snapshot, error) {\n\tsnapshots, err := s.session.DidModifyFiles(ctx, modifications)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsnapshotByURI := make(map[span.URI]source.Snapshot)\n\tfor _, c := range modifications {\n\t\tsnapshotByURI[c.URI] = nil\n\t}\n\t\/\/ Avoid diagnosing the same snapshot twice.\n\tsnapshotSet := make(map[source.Snapshot][]span.URI)\n\tfor uri := range snapshotByURI {\n\t\tview, err := s.session.ViewOf(uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar snapshot source.Snapshot\n\t\tfor _, s := range snapshots {\n\t\t\tif s.View() == view {\n\t\t\t\tif snapshot != nil {\n\t\t\t\t\treturn nil, errors.Errorf(\"duplicate snapshots for the same view\")\n\t\t\t\t}\n\t\t\t\tsnapshot = s\n\t\t\t}\n\t\t}\n\t\t\/\/ If the file isn't in any known views (for example, if it's in a dependency),\n\t\t\/\/ we may not have a snapshot to map it to. As a result, we won't try to\n\t\t\/\/ diagnose it. TODO(rstambler): Figure out how to handle this better.\n\t\tif snapshot == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsnapshotByURI[uri] = snapshot\n\t\tsnapshotSet[snapshot] = append(snapshotSet[snapshot], uri)\n\t}\n\tfor snapshot, uris := range snapshotSet {\n\t\t\/\/ If a modification comes in for the view's go.mod file and the view\n\t\t\/\/ was never properly initialized, or the view does not have\n\t\t\/\/ a go.mod file, try to recreate the associated view.\n\t\tif modfile, _ := snapshot.View().ModFiles(); modfile == \"\" {\n\t\t\tfor _, uri := range uris {\n\t\t\t\t\/\/ Don't rebuild the view until the go.mod is on disk.\n\t\t\t\tif !snapshot.IsSaved(uri) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfh, err := snapshot.GetFile(uri)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tswitch fh.Identity().Kind {\n\t\t\t\tcase source.Mod:\n\t\t\t\t\tnewSnapshot, err := snapshot.View().Rebuild(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update the snapshot to the rebuilt one.\n\t\t\t\t\tsnapshot = newSnapshot\n\t\t\t\t\tsnapshotByURI[uri] = newSnapshot\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tgo func(snapshot source.Snapshot) {\n\t\t\tif s.session.Options().VerboseWorkDoneProgress {\n\t\t\t\twork := s.StartWork(ctx, DiagnosticWorkTitle(cause), \"Calculating file diagnostics...\", nil)\n\t\t\t\tdefer work.End(ctx, \"Done.\")\n\t\t\t}\n\t\t\ts.diagnoseSnapshot(snapshot)\n\t\t}(snapshot)\n\t}\n\treturn snapshotByURI, nil\n}\n\n\/\/ DiagnosticWorkTitle returns the title of the diagnostic work resulting from a\n\/\/ file change originating from the given cause.\nfunc DiagnosticWorkTitle(cause ModificationSource) string {\n\treturn fmt.Sprintf(\"diagnosing %v\", cause)\n}\n\nfunc (s *Server) wasFirstChange(uri span.URI) bool {\n\tif s.changedFiles == nil {\n\t\ts.changedFiles = make(map[span.URI]struct{})\n\t}\n\t_, ok := s.changedFiles[uri]\n\treturn ok\n}\n\nfunc (s *Server) changedText(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {\n\tif len(changes) == 0 {\n\t\treturn nil, fmt.Errorf(\"%w: no content changes provided\", jsonrpc2.ErrInternal)\n\t}\n\n\t\/\/ Check if the client sent the full content of the file.\n\t\/\/ We accept a full content change even if the server expected incremental changes.\n\tif len(changes) == 1 && changes[0].Range == nil && changes[0].RangeLength == 0 {\n\t\treturn []byte(changes[0].Text), nil\n\t}\n\treturn s.applyIncrementalChanges(ctx, uri, changes)\n}\n\nfunc (s *Server) applyIncrementalChanges(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {\n\tcontent, _, err := s.session.GetFile(uri).Read(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: file not found (%v)\", jsonrpc2.ErrInternal, err)\n\t}\n\tfor _, change := range changes {\n\t\t\/\/ Make sure to update column mapper along with the content.\n\t\tconverter := span.NewContentConverter(uri.Filename(), content)\n\t\tm := &protocol.ColumnMapper{\n\t\t\tURI: uri,\n\t\t\tConverter: converter,\n\t\t\tContent: content,\n\t\t}\n\t\tif change.Range == nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: unexpected nil range for change\", jsonrpc2.ErrInternal)\n\t\t}\n\t\tspn, err := m.RangeSpan(*change.Range)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !spn.HasOffset() {\n\t\t\treturn nil, fmt.Errorf(\"%w: invalid range for content change\", jsonrpc2.ErrInternal)\n\t\t}\n\t\tstart, end := spn.Start().Offset(), spn.End().Offset()\n\t\tif end < start {\n\t\t\treturn nil, fmt.Errorf(\"%w: invalid range for content change\", jsonrpc2.ErrInternal)\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\tbuf.Write(content[:start])\n\t\tbuf.WriteString(change.Text)\n\t\tbuf.Write(content[end:])\n\t\tcontent = buf.Bytes()\n\t}\n\treturn content, nil\n}\n\nfunc changeTypeToFileAction(ct protocol.FileChangeType) source.FileAction {\n\tswitch ct {\n\tcase protocol.Changed:\n\t\treturn source.Change\n\tcase protocol.Created:\n\t\treturn source.Create\n\tcase protocol.Deleted:\n\t\treturn source.Delete\n\t}\n\treturn source.UnknownFileAction\n}\ninternal\/lsp: lift up workdone instrumentation to didModifyFiles\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lsp\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"golang.org\/x\/tools\/internal\/jsonrpc2\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/protocol\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/source\"\n\t\"golang.org\/x\/tools\/internal\/span\"\n\terrors \"golang.org\/x\/xerrors\"\n)\n\n\/\/ ModificationSource identifies the originating cause of a file modification.\ntype ModificationSource int\n\nconst (\n\t\/\/ FromDidOpen is a file modification caused by opening a file.\n\tFromDidOpen = ModificationSource(iota)\n\t\/\/ FromDidChange is a file modification caused by changing a file.\n\tFromDidChange\n\t\/\/ FromDidChangeWatchedFiles is a file modification caused by a change to a watched file.\n\tFromDidChangeWatchedFiles\n\t\/\/ FromDidSave is a file modification caused by a file save.\n\tFromDidSave\n\t\/\/ FromDidClose is a file modification caused by closing a file.\n\tFromDidClose\n\tFromRegenerateCgo\n)\n\nfunc (m ModificationSource) String() string {\n\tswitch m {\n\tcase FromDidOpen:\n\t\treturn \"opened files\"\n\tcase FromDidChange:\n\t\treturn \"changed files\"\n\tcase FromDidChangeWatchedFiles:\n\t\treturn \"files changed on disk\"\n\tcase FromDidSave:\n\t\treturn \"saved files\"\n\tcase FromRegenerateCgo:\n\t\treturn \"regenerate cgo\"\n\tdefault:\n\t\treturn \"unknown file modification\"\n\t}\n}\n\nfunc (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\n\t_, err := s.didModifyFiles(ctx, []source.FileModification{\n\t\t{\n\t\t\tURI: uri,\n\t\t\tAction: source.Open,\n\t\t\tVersion: params.TextDocument.Version,\n\t\t\tText: []byte(params.TextDocument.Text),\n\t\t\tLanguageID: params.TextDocument.LanguageID,\n\t\t},\n\t}, FromDidOpen)\n\treturn err\n}\n\nfunc (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\n\ttext, err := s.changedText(ctx, uri, params.ContentChanges)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := source.FileModification{\n\t\tURI: uri,\n\t\tAction: source.Change,\n\t\tVersion: params.TextDocument.Version,\n\t\tText: text,\n\t}\n\tsnapshots, err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidChange)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsnapshot := snapshots[uri]\n\tif snapshot == nil {\n\t\treturn errors.Errorf(\"no snapshot for %s\", uri)\n\t}\n\t\/\/ Ideally, we should be able to specify that a generated file should be opened as read-only.\n\t\/\/ Tell the user that they should not be editing a generated file.\n\tif s.wasFirstChange(uri) && source.IsGenerated(ctx, snapshot, uri) {\n\t\tif err := s.client.ShowMessage(ctx, &protocol.ShowMessageParams{\n\t\t\tMessage: fmt.Sprintf(\"Do not edit this file! %s is a generated file.\", uri.Filename()),\n\t\t\tType: protocol.Warning,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Server) didChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error {\n\tvar modifications []source.FileModification\n\tdeletions := make(map[span.URI]struct{})\n\tfor _, change := range params.Changes {\n\t\turi := change.URI.SpanURI()\n\t\tif !uri.IsFile() {\n\t\t\tcontinue\n\t\t}\n\t\taction := changeTypeToFileAction(change.Type)\n\t\tmodifications = append(modifications, source.FileModification{\n\t\t\tURI: uri,\n\t\t\tAction: action,\n\t\t\tOnDisk: true,\n\t\t})\n\t\t\/\/ Keep track of deleted files so that we can clear their diagnostics.\n\t\t\/\/ A file might be re-created after deletion, so only mark files that\n\t\t\/\/ have truly been deleted.\n\t\tswitch action {\n\t\tcase source.Delete:\n\t\t\tdeletions[uri] = struct{}{}\n\t\tcase source.Close:\n\t\tdefault:\n\t\t\tdelete(deletions, uri)\n\t\t}\n\t}\n\tsnapshots, err := s.didModifyFiles(ctx, modifications, FromDidChangeWatchedFiles)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Clear the diagnostics for any deleted files that are not open in the editor.\n\tfor uri := range deletions {\n\t\tif snapshot := snapshots[uri]; snapshot == nil || snapshot.IsOpen(uri) {\n\t\t\tcontinue\n\t\t}\n\t\tif err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{\n\t\t\tURI: protocol.URIFromSpanURI(uri),\n\t\t\tDiagnostics: []protocol.Diagnostic{},\n\t\t\tVersion: 0,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Server) didSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\tc := source.FileModification{\n\t\tURI: uri,\n\t\tAction: source.Save,\n\t\tVersion: params.TextDocument.Version,\n\t}\n\tif params.Text != nil {\n\t\tc.Text = []byte(*params.Text)\n\t}\n\t_, err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidSave)\n\treturn err\n}\n\nfunc (s *Server) didClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error {\n\turi := params.TextDocument.URI.SpanURI()\n\tif !uri.IsFile() {\n\t\treturn nil\n\t}\n\tsnapshots, err := s.didModifyFiles(ctx, []source.FileModification{\n\t\t{\n\t\t\tURI: uri,\n\t\t\tAction: source.Close,\n\t\t\tVersion: -1,\n\t\t\tText: nil,\n\t\t},\n\t}, FromDidClose)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsnapshot := snapshots[uri]\n\tif snapshot == nil {\n\t\treturn errors.Errorf(\"no snapshot for %s\", uri)\n\t}\n\tfh, err := snapshot.GetFile(uri)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ If a file has been closed and is not on disk, clear its diagnostics.\n\tif _, _, err := fh.Read(ctx); err != nil {\n\t\treturn s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{\n\t\t\tURI: protocol.URIFromSpanURI(uri),\n\t\t\tDiagnostics: []protocol.Diagnostic{},\n\t\t\tVersion: 0,\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) (map[span.URI]source.Snapshot, error) {\n\t\/\/ diagnosticWG tracks outstanding diagnostic work as a result of this file\n\t\/\/ modification.\n\tvar diagnosticWG sync.WaitGroup\n\tif s.session.Options().VerboseWorkDoneProgress {\n\t\twork := s.StartWork(ctx, DiagnosticWorkTitle(cause), \"Calculating file diagnostics...\", nil)\n\t\tdefer func() {\n\t\t\tgo func() {\n\t\t\t\tdiagnosticWG.Wait()\n\t\t\t\twork.End(ctx, \"Done.\")\n\t\t\t}()\n\t\t}()\n\t}\n\tsnapshots, err := s.session.DidModifyFiles(ctx, modifications)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsnapshotByURI := make(map[span.URI]source.Snapshot)\n\tfor _, c := range modifications {\n\t\tsnapshotByURI[c.URI] = nil\n\t}\n\t\/\/ Avoid diagnosing the same snapshot twice.\n\tsnapshotSet := make(map[source.Snapshot][]span.URI)\n\tfor uri := range snapshotByURI {\n\t\tview, err := s.session.ViewOf(uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar snapshot source.Snapshot\n\t\tfor _, s := range snapshots {\n\t\t\tif s.View() == view {\n\t\t\t\tif snapshot != nil {\n\t\t\t\t\treturn nil, errors.Errorf(\"duplicate snapshots for the same view\")\n\t\t\t\t}\n\t\t\t\tsnapshot = s\n\t\t\t}\n\t\t}\n\t\t\/\/ If the file isn't in any known views (for example, if it's in a dependency),\n\t\t\/\/ we may not have a snapshot to map it to. As a result, we won't try to\n\t\t\/\/ diagnose it. TODO(rstambler): Figure out how to handle this better.\n\t\tif snapshot == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsnapshotByURI[uri] = snapshot\n\t\tsnapshotSet[snapshot] = append(snapshotSet[snapshot], uri)\n\t}\n\tfor snapshot, uris := range snapshotSet {\n\t\t\/\/ If a modification comes in for the view's go.mod file and the view\n\t\t\/\/ was never properly initialized, or the view does not have\n\t\t\/\/ a go.mod file, try to recreate the associated view.\n\t\tif modfile, _ := snapshot.View().ModFiles(); modfile == \"\" {\n\t\t\tfor _, uri := range uris {\n\t\t\t\t\/\/ Don't rebuild the view until the go.mod is on disk.\n\t\t\t\tif !snapshot.IsSaved(uri) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfh, err := snapshot.GetFile(uri)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tswitch fh.Identity().Kind {\n\t\t\t\tcase source.Mod:\n\t\t\t\t\tnewSnapshot, err := snapshot.View().Rebuild(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update the snapshot to the rebuilt one.\n\t\t\t\t\tsnapshot = newSnapshot\n\t\t\t\t\tsnapshotByURI[uri] = newSnapshot\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdiagnosticWG.Add(1)\n\t\tgo func(snapshot source.Snapshot) {\n\t\t\tdefer diagnosticWG.Done()\n\t\t\ts.diagnoseSnapshot(snapshot)\n\t\t}(snapshot)\n\t}\n\treturn snapshotByURI, nil\n}\n\n\/\/ DiagnosticWorkTitle returns the title of the diagnostic work resulting from a\n\/\/ file change originating from the given cause.\nfunc DiagnosticWorkTitle(cause ModificationSource) string {\n\treturn fmt.Sprintf(\"diagnosing %v\", cause)\n}\n\nfunc (s *Server) wasFirstChange(uri span.URI) bool {\n\tif s.changedFiles == nil {\n\t\ts.changedFiles = make(map[span.URI]struct{})\n\t}\n\t_, ok := s.changedFiles[uri]\n\treturn ok\n}\n\nfunc (s *Server) changedText(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {\n\tif len(changes) == 0 {\n\t\treturn nil, fmt.Errorf(\"%w: no content changes provided\", jsonrpc2.ErrInternal)\n\t}\n\n\t\/\/ Check if the client sent the full content of the file.\n\t\/\/ We accept a full content change even if the server expected incremental changes.\n\tif len(changes) == 1 && changes[0].Range == nil && changes[0].RangeLength == 0 {\n\t\treturn []byte(changes[0].Text), nil\n\t}\n\treturn s.applyIncrementalChanges(ctx, uri, changes)\n}\n\nfunc (s *Server) applyIncrementalChanges(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {\n\tcontent, _, err := s.session.GetFile(uri).Read(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: file not found (%v)\", jsonrpc2.ErrInternal, err)\n\t}\n\tfor _, change := range changes {\n\t\t\/\/ Make sure to update column mapper along with the content.\n\t\tconverter := span.NewContentConverter(uri.Filename(), content)\n\t\tm := &protocol.ColumnMapper{\n\t\t\tURI: uri,\n\t\t\tConverter: converter,\n\t\t\tContent: content,\n\t\t}\n\t\tif change.Range == nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: unexpected nil range for change\", jsonrpc2.ErrInternal)\n\t\t}\n\t\tspn, err := m.RangeSpan(*change.Range)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !spn.HasOffset() {\n\t\t\treturn nil, fmt.Errorf(\"%w: invalid range for content change\", jsonrpc2.ErrInternal)\n\t\t}\n\t\tstart, end := spn.Start().Offset(), spn.End().Offset()\n\t\tif end < start {\n\t\t\treturn nil, fmt.Errorf(\"%w: invalid range for content change\", jsonrpc2.ErrInternal)\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\tbuf.Write(content[:start])\n\t\tbuf.WriteString(change.Text)\n\t\tbuf.Write(content[end:])\n\t\tcontent = buf.Bytes()\n\t}\n\treturn content, nil\n}\n\nfunc changeTypeToFileAction(ct protocol.FileChangeType) source.FileAction {\n\tswitch ct {\n\tcase protocol.Changed:\n\t\treturn source.Change\n\tcase protocol.Created:\n\t\treturn source.Create\n\tcase protocol.Deleted:\n\t\treturn source.Delete\n\t}\n\treturn source.UnknownFileAction\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"reflect\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/unversioned\"\n\tunversionedvalidation \"k8s.io\/kubernetes\/pkg\/api\/unversioned\/validation\"\n\tapivalidation \"k8s.io\/kubernetes\/pkg\/api\/validation\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/apps\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/validation\/field\"\n)\n\n\/\/ ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid.\n\/\/ Prefix indicates this name will be used as part of generation, in which case\n\/\/ trailing dashes are allowed.\nfunc ValidateStatefulSetName(name string, prefix bool) []string {\n\t\/\/ TODO: Validate that there's name for the suffix inserted by the pods.\n\t\/\/ Currently this is just \"-index\". In the future we may allow a user\n\t\/\/ specified list of suffixes and we need to validate the longest one.\n\treturn apivalidation.NameIsDNSSubdomain(name, prefix)\n}\n\n\/\/ Validates the given template and ensures that it is in accordance with the desired selector.\nfunc ValidatePodTemplateSpecForStatefulSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tif template == nil {\n\t\tallErrs = append(allErrs, field.Required(fldPath, \"\"))\n\t} else {\n\t\tif !selector.Empty() {\n\t\t\t\/\/ Verify that the StatefulSet selector matches the labels in template.\n\t\t\tlabels := labels.Set(template.Labels)\n\t\t\tif !selector.Matches(labels) {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"metadata\", \"labels\"), template.Labels, \"`selector` does not match template `labels`\"))\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO: Add validation for PodSpec, currently this will check volumes, which we know will\n\t\t\/\/ fail. We should really check that the union of the given volumes and volumeClaims match\n\t\t\/\/ volume mounts in the containers.\n\t\t\/\/ allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)\n\t\tallErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child(\"labels\"))...)\n\t\tallErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child(\"annotations\"))...)\n\t\tallErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, &template.Spec, fldPath.Child(\"annotations\"))...)\n\t}\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSetSpec tests if required fields in the StatefulSet spec are set.\nfunc ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child(\"replicas\"))...)\n\tif spec.Selector == nil {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"selector\"), \"\"))\n\t} else {\n\t\tallErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child(\"selector\"))...)\n\t\tif len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"selector\"), spec.Selector, \"empty selector is not valid for statefulset.\"))\n\t\t}\n\t}\n\n\tselector, err := unversioned.LabelSelectorAsSelector(spec.Selector)\n\tif err != nil {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"selector\"), spec.Selector, \"\"))\n\t} else {\n\t\tallErrs = append(allErrs, ValidatePodTemplateSpecForStatefulSet(&spec.Template, selector, fldPath.Child(\"template\"))...)\n\t}\n\n\tif spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {\n\t\tallErrs = append(allErrs, field.NotSupported(fldPath.Child(\"template\", \"spec\", \"restartPolicy\"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))\n\t}\n\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSet validates a StatefulSet.\nfunc ValidateStatefulSet(statefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMeta(&statefulSet.ObjectMeta, true, ValidateStatefulSetName, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath(\"spec\"))...)\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set.\nfunc ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath(\"metadata\"))\n\n\t\/\/ TODO: For now we're taking the safe route and disallowing all updates to\n\t\/\/ spec except for Replicas, for scaling, and Template.Spec.containers.image\n\t\/\/ for rolling-update. Enable others on a case by case basis.\n\trestoreReplicas := statefulSet.Spec.Replicas\n\tstatefulSet.Spec.Replicas = oldStatefulSet.Spec.Replicas\n\n\trestoreContainers := statefulSet.Spec.Template.Spec.Containers\n\tstatefulSet.Spec.Template.Spec.Containers = oldStatefulSet.Spec.Template.Spec.Containers\n\n\tif !reflect.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\"), \"updates to statefulset spec for fields other than 'replicas' are forbidden.\"))\n\t}\n\tstatefulSet.Spec.Replicas = restoreReplicas\n\tstatefulSet.Spec.Template.Spec.Containers = restoreContainers\n\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(statefulSet.Spec.Replicas), field.NewPath(\"spec\", \"replicas\"))...)\n\tcontainerErrs, _ := apivalidation.ValidateContainerUpdates(statefulSet.Spec.Template.Spec.Containers, oldStatefulSet.Spec.Template.Spec.Containers, field.NewPath(\"spec\").Child(\"template\").Child(\"containers\"))\n\tallErrs = append(allErrs, containerErrs...)\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSetStatusUpdate tests if required fields in the StatefulSet are set.\nfunc ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath(\"metadata\"))...)\n\t\/\/ TODO: Validate status.\n\treturn allErrs\n}\nUpdate validation message.\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"reflect\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/unversioned\"\n\tunversionedvalidation \"k8s.io\/kubernetes\/pkg\/api\/unversioned\/validation\"\n\tapivalidation \"k8s.io\/kubernetes\/pkg\/api\/validation\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/apps\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/validation\/field\"\n)\n\n\/\/ ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid.\n\/\/ Prefix indicates this name will be used as part of generation, in which case\n\/\/ trailing dashes are allowed.\nfunc ValidateStatefulSetName(name string, prefix bool) []string {\n\t\/\/ TODO: Validate that there's name for the suffix inserted by the pods.\n\t\/\/ Currently this is just \"-index\". In the future we may allow a user\n\t\/\/ specified list of suffixes and we need to validate the longest one.\n\treturn apivalidation.NameIsDNSSubdomain(name, prefix)\n}\n\n\/\/ Validates the given template and ensures that it is in accordance with the desired selector.\nfunc ValidatePodTemplateSpecForStatefulSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tif template == nil {\n\t\tallErrs = append(allErrs, field.Required(fldPath, \"\"))\n\t} else {\n\t\tif !selector.Empty() {\n\t\t\t\/\/ Verify that the StatefulSet selector matches the labels in template.\n\t\t\tlabels := labels.Set(template.Labels)\n\t\t\tif !selector.Matches(labels) {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"metadata\", \"labels\"), template.Labels, \"`selector` does not match template `labels`\"))\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO: Add validation for PodSpec, currently this will check volumes, which we know will\n\t\t\/\/ fail. We should really check that the union of the given volumes and volumeClaims match\n\t\t\/\/ volume mounts in the containers.\n\t\t\/\/ allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)\n\t\tallErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child(\"labels\"))...)\n\t\tallErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child(\"annotations\"))...)\n\t\tallErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, &template.Spec, fldPath.Child(\"annotations\"))...)\n\t}\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSetSpec tests if required fields in the StatefulSet spec are set.\nfunc ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child(\"replicas\"))...)\n\tif spec.Selector == nil {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"selector\"), \"\"))\n\t} else {\n\t\tallErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child(\"selector\"))...)\n\t\tif len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"selector\"), spec.Selector, \"empty selector is not valid for statefulset.\"))\n\t\t}\n\t}\n\n\tselector, err := unversioned.LabelSelectorAsSelector(spec.Selector)\n\tif err != nil {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"selector\"), spec.Selector, \"\"))\n\t} else {\n\t\tallErrs = append(allErrs, ValidatePodTemplateSpecForStatefulSet(&spec.Template, selector, fldPath.Child(\"template\"))...)\n\t}\n\n\tif spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {\n\t\tallErrs = append(allErrs, field.NotSupported(fldPath.Child(\"template\", \"spec\", \"restartPolicy\"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))\n\t}\n\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSet validates a StatefulSet.\nfunc ValidateStatefulSet(statefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMeta(&statefulSet.ObjectMeta, true, ValidateStatefulSetName, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath(\"spec\"))...)\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set.\nfunc ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath(\"metadata\"))\n\n\t\/\/ TODO: For now we're taking the safe route and disallowing all updates to\n\t\/\/ spec except for Replicas, for scaling, and Template.Spec.containers.image\n\t\/\/ for rolling-update. Enable others on a case by case basis.\n\trestoreReplicas := statefulSet.Spec.Replicas\n\tstatefulSet.Spec.Replicas = oldStatefulSet.Spec.Replicas\n\n\trestoreContainers := statefulSet.Spec.Template.Spec.Containers\n\tstatefulSet.Spec.Template.Spec.Containers = oldStatefulSet.Spec.Template.Spec.Containers\n\n\tif !reflect.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\"), \"updates to statefulset spec for fields other than 'replicas' and 'containers' is forbidden.\"))\n\t}\n\tstatefulSet.Spec.Replicas = restoreReplicas\n\tstatefulSet.Spec.Template.Spec.Containers = restoreContainers\n\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(statefulSet.Spec.Replicas), field.NewPath(\"spec\", \"replicas\"))...)\n\tcontainerErrs, _ := apivalidation.ValidateContainerUpdates(statefulSet.Spec.Template.Spec.Containers, oldStatefulSet.Spec.Template.Spec.Containers, field.NewPath(\"spec\").Child(\"template\").Child(\"containers\"))\n\tallErrs = append(allErrs, containerErrs...)\n\treturn allErrs\n}\n\n\/\/ ValidateStatefulSetStatusUpdate tests if required fields in the StatefulSet are set.\nfunc ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath(\"metadata\"))...)\n\t\/\/ TODO: Validate status.\n\treturn allErrs\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"log\"\n\t\"time\"\n\t\"bufio\"\n\t\"strings\"\n\t\"strconv\"\n\t\"fmt\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n)\n\nfunc (self *AlmazServer) StartHttpface(bindAddress string) {\n log.Printf(\"Http interface available at %s\", bindAddress)\n http.HandleFunc(\"\/\", self.http_main)\n http.HandleFunc(\"\/list\/all\/\", self.http_list_all)\n http.HandleFunc(\"\/list\/group\/\", self.http_list_group)\n http.HandleFunc(\"\/almaz\/list\/all\/\", self.http_list_all)\n http.HandleFunc(\"\/almaz\/list\/group\/\", self.http_list_group)\n http.ListenAndServe(bindAddress, nil)\n}\n\nfunc (self *AlmazServer) http_main(w http.ResponseWriter, r *http.Request) {\n\tself.RLock()\n\tdefer self.RUnlock()\n\tfmt.Fprintf(w, \"Metrics count: %d\\n\", self.storage.MetricCount())\n}\n\nfunc (self *AlmazServer) http_list_all(w http.ResponseWriter, r *http.Request) {\n\tself.RLock()\n\tdefer self.RUnlock()\n\n\tperiods := []int64{60, 15*60, 60*60, 4*60*60, 24*60*60}\n\tnow := time.Now().Unix()\n\n\tfor k := range self.storage.metrics {\n\t\tfmt.Fprintf(w, \"%s\", k)\n\t\tcounts_per_period := self.storage.metrics[k].GetSumsPerPeriodUntilNow(periods, now)\n\t\tfor _, el := range counts_per_period {\n\t\t\tfmt.Fprintf(w, \"\\t%f\", el)\n\t\t}\n\t\tfmt.Fprintf(w, \"\\n\")\n\t}\n}\n\nfunc (self *AlmazServer) http_list_group(w http.ResponseWriter, r *http.Request) {\n\tself.RLock()\n\tdefer self.RUnlock()\n\n\tnow := time.Now().Unix()\n\n\tdefer r.Body.Close()\n\tscanner := bufio.NewScanner(r.Body)\n\tok := scanner.Scan()\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"400 Bad Request\\r\\n\")\n\t\tfmt.Fprintf(w, \"Use POST method and specify period durations in seconds \")\n\t\tfmt.Fprintf(w, \"on the first line of request data.\\n\")\n\t\treturn\n\t}\n\tperiods_str := strings.Split(scanner.Text(), \" \")\n\n\tperiods := make([]int64, len(periods_str))\n\tfor i := range periods_str {\n\t\tperiod, err := strconv.ParseInt(periods_str[i], 10, 64)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"400 Bad Request\\r\\n\")\n\t\t\tfmt.Fprintf(w, \"%s\", err)\n\t\t\treturn\n\t\t}\n\t\tperiods[i] = period\n\t}\n\n\tgroups := make([]string, 0)\n\tfor scanner.Scan() {\n\t\tgroups = append(groups, scanner.Text())\n\t}\n\n\tvar results [][]float64\n\tresults = self.storage.SumByPeriodGroupingQuery(groups, periods, now, true)\n\tfor k := range groups {\n\t\tfmt.Fprintf(w, \"%s\", groups[k])\n\t\tfor _, el := range results[k] {\n\t\t\tfmt.Fprintf(w, \"\\t%f\", el)\n\t\t}\n\t\tfmt.Fprintf(w, \"\\n\")\n\t}\n}\nseparate http views for hard and interpolated counterspackage main\n\nimport (\n\t\"log\"\n\t\"time\"\n\t\"bufio\"\n\t\"strings\"\n\t\"strconv\"\n\t\"fmt\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n)\n\nfunc (self *AlmazServer) StartHttpface(bindAddress string) {\n log.Printf(\"Http interface available at %s\", bindAddress)\n http.HandleFunc(\"\/\", self.http_main)\n http.HandleFunc(\"\/list\/all\/\", self.http_list_all)\n http.HandleFunc(\"\/list\/all-interpolated\/\", self.http_list_all_smooth)\n http.HandleFunc(\"\/list\/group\/\", self.http_list_group)\n http.HandleFunc(\"\/almaz\/list\/all\/\", self.http_list_all)\n http.HandleFunc(\"\/almaz\/list\/all-interpolated\/\", self.http_list_all_smooth)\n http.HandleFunc(\"\/almaz\/list\/group\/\", self.http_list_group)\n http.ListenAndServe(bindAddress, nil)\n}\n\nfunc (self *AlmazServer) http_main(w http.ResponseWriter, r *http.Request) {\n\tself.RLock()\n\tdefer self.RUnlock()\n\tfmt.Fprintf(w, \"Metrics count: %d\\n\", self.storage.MetricCount())\n}\n\nfunc (self *AlmazServer) http_list_all(w http.ResponseWriter, r *http.Request) {\n\tself.http_list_all_with_interpolation(w, r, false)\n}\n\nfunc (self *AlmazServer) http_list_all_smooth(w http.ResponseWriter, r *http.Request) {\n\tself.http_list_all_with_interpolation(w, r, true)\n}\n\nfunc (self *AlmazServer) http_list_all_with_interpolation(w http.ResponseWriter, r *http.Request, interpolate bool) {\n\tself.RLock()\n\tdefer self.RUnlock()\n\n\tperiods := []int64{60, 15*60, 60*60, 4*60*60, 24*60*60}\n\tnow := time.Now().Unix()\n\n\tfor k := range self.storage.metrics {\n\t\tfmt.Fprintf(w, \"%s\", k)\n\t\tcounts_per_period := self.storage.metrics[k].GetSumsPerPeriodUntilNowWithInterpolation(periods, now, interpolate)\n\t\tfor _, el := range counts_per_period {\n\t\t\tfmt.Fprintf(w, \"\\t%f\", el)\n\t\t}\n\t\tfmt.Fprintf(w, \"\\n\")\n\t}\n}\n\nfunc (self *AlmazServer) http_list_group(w http.ResponseWriter, r *http.Request) {\n\tself.RLock()\n\tdefer self.RUnlock()\n\n\tnow := time.Now().Unix()\n\n\tdefer r.Body.Close()\n\tscanner := bufio.NewScanner(r.Body)\n\tok := scanner.Scan()\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"400 Bad Request\\r\\n\")\n\t\tfmt.Fprintf(w, \"Use POST method and specify period durations in seconds \")\n\t\tfmt.Fprintf(w, \"on the first line of request data.\\n\")\n\t\treturn\n\t}\n\tperiods_str := strings.Split(scanner.Text(), \" \")\n\n\tperiods := make([]int64, len(periods_str))\n\tfor i := range periods_str {\n\t\tperiod, err := strconv.ParseInt(periods_str[i], 10, 64)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"400 Bad Request\\r\\n\")\n\t\t\tfmt.Fprintf(w, \"%s\", err)\n\t\t\treturn\n\t\t}\n\t\tperiods[i] = period\n\t}\n\n\tgroups := make([]string, 0)\n\tfor scanner.Scan() {\n\t\tgroups = append(groups, scanner.Text())\n\t}\n\n\tvar results [][]float64\n\tresults = self.storage.SumByPeriodGroupingQuery(groups, periods, now, true)\n\tfor k := range groups {\n\t\tfmt.Fprintf(w, \"%s\", groups[k])\n\t\tfor _, el := range results[k] {\n\t\t\tfmt.Fprintf(w, \"\\t%f\", el)\n\t\t}\n\t\tfmt.Fprintf(w, \"\\n\")\n\t}\n}\n<|endoftext|>"} {"text":"package connectors\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"github.com\/projectjane\/jane\/parse\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\n\/\/ ImageMe Struct representing the image me connector\ntype ImageMe struct {\n}\n\n\/\/Listen Not implemented\nfunc (x ImageMe) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\treturn\n}\n\n\/\/ Command Takes in animateme or imageme command\nfunc (x ImageMe) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tif match, tokens := parse.Match(\"image me*\", message.In.Text); match {\n\t\tmessage.In.Tags = parse.TagAppend(message.In.Tags, connector.Tags)\n\t\tmessage.Out.Text = callImageMe(tokens[\"*\"], connector.KeyValues[\"Key\"], connector.Pass, false)\n\t\tpublishMsgs <- message\n\t}\n\tif match, tokens := parse.Match(\"animate me*\", message.In.Text); match {\n\t\tmessage.In.Tags = parse.TagAppend(message.In.Tags, connector.Tags)\n\t\tmessage.Out.Text = callImageMe(tokens[\"*\"], connector.KeyValues[\"Key\"], connector.Pass, true)\n\t\tpublishMsgs <- message\n\t}\n}\n\n\/\/ Publish Not implemented\nfunc (x ImageMe) Publish(publishMsgs <-chan models.Message, connector models.Connector) {\n\treturn\n}\n\n\/\/ Help Returns help data\nfunc (x ImageMe) Help(connector models.Connector) (help []string) {\n\thelp = make([]string, 0)\n\thelp = append(help, \"image me - pulls back an image url\")\n\thelp = append(help, \"animate me - pulls back an animated gif url\")\n\treturn help\n}\n\ntype searchResult struct {\n\tItems []items `json:\"items\"`\n}\n\ntype items struct {\n\tLink string `json:\"link\"`\n}\n\nvar imageClient = &http.Client{}\n\nvar baseURL = \"https:\/\/www.googleapis.com\/customsearch\/v1?key=\"\nvar errorMessage = \"Error retrieving image\"\nvar animated bool\n\nfunc callImageMe(msg string, apiKey string, cx string, animated bool) string {\n\tstart := rand.Intn(3)\n\tif start < 1 {\n\t\tstart = 1\n\t}\n\n\tcx = \"&cx=\" + cx\n\treturnFields := fmt.Sprintf(\"&fields=items(link)&start=%v\", start)\n\tquery := \"&q=\" + url.QueryEscape(msg)\n\tfields := \"&searchType=image\"\n\tif animated {\n\t\tfields += \"&fileType=gif&hq=animated&tbs=itp:animated\"\n\t}\n\n\turl := baseURL + apiKey + cx + returnFields + query + fields\n\n\tresp, err := imageClient.Get(url)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn findDeprecatedImage(msg, animated)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tvar result searchResult\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tif len(result.Items) > 0 {\n\t\trandomLink := result.Items[rand.Intn(len(result.Items))]\n\t\treturn randomLink.Link\n\t}\n\n\treturn findDeprecatedImage(msg, animated)\n}\n\ntype deprecatedResult struct {\n\tResponseData responseData `json:\"responseData\"`\n}\n\ntype responseData struct {\n\tResults []result `json:\"results\"`\n}\n\ntype result struct {\n\tURL string `json:\"url\"`\n}\n\nfunc findDeprecatedImage(query string, animated bool) string {\n\tbaseURL := \"https:\/\/ajax.googleapis.com\/ajax\/services\/search\/images?v=1.0&rsz=8\"\n\tif animated {\n\t\tbaseURL += \"&as_filetype=gif\"\n\t}\n\n\tbaseURL += \"&q=\"\n\tsearchURL := baseURL + url.QueryEscape(query)\n\n\tif animated {\n\t\tsearchURL += url.QueryEscape(\" animated\")\n\t}\n\n\tresp, err := imageClient.Get(searchURL)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tvar result deprecatedResult\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tindex := rand.Intn(len(result.ResponseData.Results))\n\n\tif len(result.ResponseData.Results) > 0 {\n\t\treturn result.ResponseData.Results[index].URL\n\t}\n\n\treturn \"No results found\"\n}\nRolling back a change to imagemepackage connectors\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"github.com\/projectjane\/jane\/parse\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\n\/\/ ImageMe Struct representing the image me connector\ntype ImageMe struct {\n}\n\n\/\/Listen Not implemented\nfunc (x ImageMe) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\treturn\n}\n\n\/\/ Command Takes in animateme or imageme command\nfunc (x ImageMe) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tif match, tokens := parse.Match(\"image me*\", message.In.Text); match {\n\t\tmessage.In.Tags = parse.TagAppend(message.In.Tags, connector.Tags)\n\t\tmessage.Out.Text = callImageMe(tokens[\"*\"], connector.Key, connector.Pass, false)\n\t\tpublishMsgs <- message\n\t}\n\tif match, tokens := parse.Match(\"animate me*\", message.In.Text); match {\n\t\tmessage.In.Tags = parse.TagAppend(message.In.Tags, connector.Tags)\n\t\tmessage.Out.Text = callImageMe(tokens[\"*\"], connector.Key, connector.Pass, true)\n\t\tpublishMsgs <- message\n\t}\n}\n\n\/\/ Publish Not implemented\nfunc (x ImageMe) Publish(publishMsgs <-chan models.Message, connector models.Connector) {\n\treturn\n}\n\n\/\/ Help Returns help data\nfunc (x ImageMe) Help(connector models.Connector) (help []string) {\n\thelp = make([]string, 0)\n\thelp = append(help, \"image me - pulls back an image url\")\n\thelp = append(help, \"animate me - pulls back an animated gif url\")\n\treturn help\n}\n\ntype searchResult struct {\n\tItems []items `json:\"items\"`\n}\n\ntype items struct {\n\tLink string `json:\"link\"`\n}\n\nvar imageClient = &http.Client{}\n\nvar baseURL = \"https:\/\/www.googleapis.com\/customsearch\/v1?key=\"\nvar errorMessage = \"Error retrieving image\"\nvar animated bool\n\nfunc callImageMe(msg string, apiKey string, cx string, animated bool) string {\n\tstart := rand.Intn(3)\n\tif start < 1 {\n\t\tstart = 1\n\t}\n\n\tcx = \"&cx=\" + cx\n\treturnFields := fmt.Sprintf(\"&fields=items(link)&start=%v\", start)\n\tquery := \"&q=\" + url.QueryEscape(msg)\n\tfields := \"&searchType=image\"\n\tif animated {\n\t\tfields += \"&fileType=gif&hq=animated&tbs=itp:animated\"\n\t}\n\n\turl := baseURL + apiKey + cx + returnFields + query + fields\n\n\tresp, err := imageClient.Get(url)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn findDeprecatedImage(msg, animated)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tvar result searchResult\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tif len(result.Items) > 0 {\n\t\trandomLink := result.Items[rand.Intn(len(result.Items))]\n\t\treturn randomLink.Link\n\t}\n\n\treturn findDeprecatedImage(msg, animated)\n}\n\ntype deprecatedResult struct {\n\tResponseData responseData `json:\"responseData\"`\n}\n\ntype responseData struct {\n\tResults []result `json:\"results\"`\n}\n\ntype result struct {\n\tURL string `json:\"url\"`\n}\n\nfunc findDeprecatedImage(query string, animated bool) string {\n\tbaseURL := \"https:\/\/ajax.googleapis.com\/ajax\/services\/search\/images?v=1.0&rsz=8\"\n\tif animated {\n\t\tbaseURL += \"&as_filetype=gif\"\n\t}\n\n\tbaseURL += \"&q=\"\n\tsearchURL := baseURL + url.QueryEscape(query)\n\n\tif animated {\n\t\tsearchURL += url.QueryEscape(\" animated\")\n\t}\n\n\tresp, err := imageClient.Get(searchURL)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tvar result deprecatedResult\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn errorMessage\n\t}\n\n\tindex := rand.Intn(len(result.ResponseData.Results))\n\n\tif len(result.ResponseData.Results) > 0 {\n\t\treturn result.ResponseData.Results[index].URL\n\t}\n\n\treturn \"No results found\"\n}\n<|endoftext|>"} {"text":"\/\/\n\/\/ Copyright 2021, Sander van Harmelen, Michael Lihs\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage gitlab\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\n\/\/ ProtectedBranchesService handles communication with the protected branch\n\/\/ related methods of the GitLab API.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protected-branches-api\ntype ProtectedBranchesService struct {\n\tclient *Client\n}\n\n\/\/ ProtectedBranch represents a protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#list-protected-branches\ntype ProtectedBranch struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tPushAccessLevels []*BranchAccessDescription `json:\"push_access_levels\"`\n\tMergeAccessLevels []*BranchAccessDescription `json:\"merge_access_levels\"`\n\tUnprotectAccessLevels []*BranchAccessDescription `json:\"unprotect_access_levels\"`\n\tAllowForcePush bool `json:\"allow_force_push\"`\n\tCodeOwnerApprovalRequired bool `json:\"code_owner_approval_required\"`\n}\n\n\/\/ BranchAccessDescription represents the access description for a protected\n\/\/ branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protected-branches-api\ntype BranchAccessDescription struct {\n\tAccessLevel AccessLevelValue `json:\"access_level\"`\n\tAccessLevelDescription string `json:\"access_level_description\"`\n\tUserID int `json:\"user_id\"`\n\tGroupID int `json:\"group_id\"`\n}\n\n\/\/ ListProtectedBranchesOptions represents the available ListProtectedBranches()\n\/\/ options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#list-protected-branches\ntype ListProtectedBranchesOptions ListOptions\n\n\/\/ ListProtectedBranches gets a list of protected branches from a project.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#list-protected-branches\nfunc (s *ProtectedBranchesService) ListProtectedBranches(pid interface{}, opt *ListProtectedBranchesOptions, options ...RequestOptionFunc) ([]*ProtectedBranch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\", pathEscape(project))\n\n\treq, err := s.client.NewRequest(http.MethodGet, u, opt, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar p []*ProtectedBranch\n\tresp, err := s.client.Do(req, &p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}\n\n\/\/ GetProtectedBranch gets a single protected branch or wildcard protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#get-a-single-protected-branch-or-wildcard-protected-branch\nfunc (s *ProtectedBranchesService) GetProtectedBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodGet, u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tp := new(ProtectedBranch)\n\tresp, err := s.client.Do(req, p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}\n\n\/\/ ProtectRepositoryBranchesOptions represents the available\n\/\/ ProtectRepositoryBranches() options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protect-repository-branches\ntype ProtectRepositoryBranchesOptions struct {\n\tName *string `url:\"name,omitempty\" json:\"name,omitempty\"`\n\tPushAccessLevel *AccessLevelValue `url:\"push_access_level,omitempty\" json:\"push_access_level,omitempty\"`\n\tMergeAccessLevel *AccessLevelValue `url:\"merge_access_level,omitempty\" json:\"merge_access_level,omitempty\"`\n\tUnprotectAccessLevel *AccessLevelValue `url:\"unprotect_access_level,omitempty\" json:\"unprotect_access_level,omitempty\"`\n\tAllowedToPush []*BranchPermissionOptions `url:\"allowed_to_push,omitempty\" json:\"allowed_to_push,omitempty\"`\n\tAllowedToMerge []*BranchPermissionOptions `url:\"allowed_to_merge,omitempty\" json:\"allowed_to_merge,omitempty\"`\n\tAllowedToUnprotect []*BranchPermissionOptions `url:\"allowed_to_unprotect,omitempty\" json:\"allowed_to_unprotect,omitempty\"`\n\tAllowForcePush *bool `url:\"allow_force_push,omitempty\" json:\"allow_force_push,omitempty\"`\n\tCodeOwnerApprovalRequired *bool `url:\"code_owner_approval_required,omitempty\" json:\"code_owner_approval_required,omitempty\"`\n}\n\n\/\/ BranchPermissionOptions represents a branch permission option.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protect-repository-branches\ntype BranchPermissionOptions struct {\n\tUserID *int `url:\"user_id,omitempty\" json:\"user_id,omitempty\"`\n\tGroupID *int `url:\"group_id,omitempty\" json:\"group_id,omitempty\"`\n\tAccessLevel *AccessLevelValue `url:\"access_level,omitempty\" json:\"access_level,omitempty\"`\n}\n\n\/\/ ProtectRepositoryBranches protects a single repository branch or several\n\/\/ project repository branches using a wildcard protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protect-repository-branches\nfunc (s *ProtectedBranchesService) ProtectRepositoryBranches(pid interface{}, opt *ProtectRepositoryBranchesOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\", pathEscape(project))\n\n\treq, err := s.client.NewRequest(http.MethodPost, u, opt, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tp := new(ProtectedBranch)\n\tresp, err := s.client.Do(req, p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}\n\n\/\/ UnprotectRepositoryBranches unprotects the given protected branch or wildcard\n\/\/ protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#unprotect-repository-branches\nfunc (s *ProtectedBranchesService) UnprotectRepositoryBranches(pid interface{}, branch string, options ...RequestOptionFunc) (*Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodDelete, u, nil, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(req, nil)\n}\n\n\/\/ RequireCodeOwnerApprovalsOptions represents the available\n\/\/ RequireCodeOwnerApprovals() options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ee\/api\/protected_branches.html#require-code-owner-approvals-for-a-single-branch\ntype RequireCodeOwnerApprovalsOptions struct {\n\tCodeOwnerApprovalRequired *bool `url:\"code_owner_approval_required,omitempty\" json:\"code_owner_approval_required,omitempty\"`\n}\n\n\/\/ RequireCodeOwnerApprovals updates the code owner approval.\n\/\/\n\/\/ Gitlab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ee\/api\/protected_branches.html#require-code-owner-approvals-for-a-single-branch\nfunc (s *ProtectedBranchesService) RequireCodeOwnerApprovals(pid interface{}, branch string, opt *RequireCodeOwnerApprovalsOptions, options ...RequestOptionFunc) (*Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPatch, u, opt, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(req, nil)\n}\nAllow updating the allow_force_push option of protected branches\/\/\n\/\/ Copyright 2021, Sander van Harmelen, Michael Lihs\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage gitlab\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\n\/\/ ProtectedBranchesService handles communication with the protected branch\n\/\/ related methods of the GitLab API.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protected-branches-api\ntype ProtectedBranchesService struct {\n\tclient *Client\n}\n\n\/\/ ProtectedBranch represents a protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#list-protected-branches\ntype ProtectedBranch struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tPushAccessLevels []*BranchAccessDescription `json:\"push_access_levels\"`\n\tMergeAccessLevels []*BranchAccessDescription `json:\"merge_access_levels\"`\n\tUnprotectAccessLevels []*BranchAccessDescription `json:\"unprotect_access_levels\"`\n\tAllowForcePush bool `json:\"allow_force_push\"`\n\tCodeOwnerApprovalRequired bool `json:\"code_owner_approval_required\"`\n}\n\n\/\/ BranchAccessDescription represents the access description for a protected\n\/\/ branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protected-branches-api\ntype BranchAccessDescription struct {\n\tAccessLevel AccessLevelValue `json:\"access_level\"`\n\tAccessLevelDescription string `json:\"access_level_description\"`\n\tUserID int `json:\"user_id\"`\n\tGroupID int `json:\"group_id\"`\n}\n\n\/\/ ListProtectedBranchesOptions represents the available ListProtectedBranches()\n\/\/ options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#list-protected-branches\ntype ListProtectedBranchesOptions ListOptions\n\n\/\/ ListProtectedBranches gets a list of protected branches from a project.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#list-protected-branches\nfunc (s *ProtectedBranchesService) ListProtectedBranches(pid interface{}, opt *ListProtectedBranchesOptions, options ...RequestOptionFunc) ([]*ProtectedBranch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\", pathEscape(project))\n\n\treq, err := s.client.NewRequest(http.MethodGet, u, opt, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar p []*ProtectedBranch\n\tresp, err := s.client.Do(req, &p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}\n\n\/\/ GetProtectedBranch gets a single protected branch or wildcard protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#get-a-single-protected-branch-or-wildcard-protected-branch\nfunc (s *ProtectedBranchesService) GetProtectedBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodGet, u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tp := new(ProtectedBranch)\n\tresp, err := s.client.Do(req, p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}\n\n\/\/ ProtectRepositoryBranchesOptions represents the available\n\/\/ ProtectRepositoryBranches() options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protect-repository-branches\ntype ProtectRepositoryBranchesOptions struct {\n\tName *string `url:\"name,omitempty\" json:\"name,omitempty\"`\n\tPushAccessLevel *AccessLevelValue `url:\"push_access_level,omitempty\" json:\"push_access_level,omitempty\"`\n\tMergeAccessLevel *AccessLevelValue `url:\"merge_access_level,omitempty\" json:\"merge_access_level,omitempty\"`\n\tUnprotectAccessLevel *AccessLevelValue `url:\"unprotect_access_level,omitempty\" json:\"unprotect_access_level,omitempty\"`\n\tAllowedToPush []*BranchPermissionOptions `url:\"allowed_to_push,omitempty\" json:\"allowed_to_push,omitempty\"`\n\tAllowedToMerge []*BranchPermissionOptions `url:\"allowed_to_merge,omitempty\" json:\"allowed_to_merge,omitempty\"`\n\tAllowedToUnprotect []*BranchPermissionOptions `url:\"allowed_to_unprotect,omitempty\" json:\"allowed_to_unprotect,omitempty\"`\n\tAllowForcePush *bool `url:\"allow_force_push,omitempty\" json:\"allow_force_push,omitempty\"`\n\tCodeOwnerApprovalRequired *bool `url:\"code_owner_approval_required,omitempty\" json:\"code_owner_approval_required,omitempty\"`\n}\n\n\/\/ BranchPermissionOptions represents a branch permission option.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protect-repository-branches\ntype BranchPermissionOptions struct {\n\tUserID *int `url:\"user_id,omitempty\" json:\"user_id,omitempty\"`\n\tGroupID *int `url:\"group_id,omitempty\" json:\"group_id,omitempty\"`\n\tAccessLevel *AccessLevelValue `url:\"access_level,omitempty\" json:\"access_level,omitempty\"`\n}\n\n\/\/ ProtectRepositoryBranches protects a single repository branch or several\n\/\/ project repository branches using a wildcard protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#protect-repository-branches\nfunc (s *ProtectedBranchesService) ProtectRepositoryBranches(pid interface{}, opt *ProtectRepositoryBranchesOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\", pathEscape(project))\n\n\treq, err := s.client.NewRequest(http.MethodPost, u, opt, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tp := new(ProtectedBranch)\n\tresp, err := s.client.Do(req, p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}\n\n\/\/ UnprotectRepositoryBranches unprotects the given protected branch or wildcard\n\/\/ protected branch.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/protected_branches.html#unprotect-repository-branches\nfunc (s *ProtectedBranchesService) UnprotectRepositoryBranches(pid interface{}, branch string, options ...RequestOptionFunc) (*Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodDelete, u, nil, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(req, nil)\n}\n\n\/\/ AllowForcePushOptions represents the available\n\/\/ AllowForcePush() options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ee\/api\/protected_branches.html#allow_force_push # FIXME: not documented yet but this is what is done by the UI\ntype AllowForcePushOptions struct {\n\tAllowForcePush *bool `url:\"allow_force_push,omitempty\" json:\"allow_force_push,omitempty\"`\n}\n\n\/\/ AllowForcePush updates the allow force push option.\n\/\/\n\/\/ Gitlab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ee\/api\/protected_branches.html#allow_force_push # FIXME: not documented yet but this is what is done by the UI\nfunc (s *ProtectedBranchesService) AllowForcePush(pid interface{}, branch string, opt *AllowForcePushOptions, options ...RequestOptionFunc) (*Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPatch, u, opt, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(req, nil)\n}\n\n\/\/ RequireCodeOwnerApprovalsOptions represents the available\n\/\/ RequireCodeOwnerApprovals() options.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ee\/api\/protected_branches.html#require-code-owner-approvals-for-a-single-branch\ntype RequireCodeOwnerApprovalsOptions struct {\n\tCodeOwnerApprovalRequired *bool `url:\"code_owner_approval_required,omitempty\" json:\"code_owner_approval_required,omitempty\"`\n}\n\n\/\/ RequireCodeOwnerApprovals updates the code owner approval option.\n\/\/\n\/\/ Gitlab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ee\/api\/protected_branches.html#require-code-owner-approvals-for-a-single-branch\nfunc (s *ProtectedBranchesService) RequireCodeOwnerApprovals(pid interface{}, branch string, opt *RequireCodeOwnerApprovalsOptions, options ...RequestOptionFunc) (*Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/protected_branches\/%s\", pathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPatch, u, opt, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(req, nil)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (C) 2014 The Syncthing Authors.\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify it\n\/\/ under the terms of the GNU General Public License as published by the Free\n\/\/ Software Foundation, either version 3 of the License, or (at your option)\n\/\/ any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful, but WITHOUT\n\/\/ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n\/\/ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n\/\/ more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License along\n\/\/ with this program. If not, see .\n\npackage versioner\n\nimport \"testing\"\n\nfunc TestTaggedFilename(t *testing.T) {\n\tcases := [][3]string{\n\t\t{\"foo\/bar.baz\", \"tag\", \"foo\/bar~tag.baz\"},\n\t\t{\"bar.baz\", \"tag\", \"bar~tag.baz\"},\n\t\t{\"bar\", \"tag\", \"bar~tag\"},\n\n\t\t\/\/ Parsing test only\n\t\t{\"\", \"tag-only\", \"foo\/bar.baz~tag-only\"},\n\t\t{\"\", \"tag-only\", \"bar.baz~tag-only\"},\n\t}\n\n\tfor _, tc := range cases {\n\t\tif tc[0] != \"\" {\n\t\t\t\/\/ Test tagger\n\t\t\ttf := taggedFilename(tc[0], tc[1])\n\t\t\tif tf != tc[2] {\n\t\t\t\tt.Errorf(\"%s != %s\", tf, tc[2])\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Test parser\n\t\ttag := filenameTag(tc[2])\n\t\tif tag != tc[1] {\n\t\t\tt.Errorf(\"%s != %s\", tag, tc[1])\n\t\t}\n\t}\n}\nFix tests on Windows\/\/ Copyright (C) 2014 The Syncthing Authors.\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify it\n\/\/ under the terms of the GNU General Public License as published by the Free\n\/\/ Software Foundation, either version 3 of the License, or (at your option)\n\/\/ any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful, but WITHOUT\n\/\/ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n\/\/ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n\/\/ more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License along\n\/\/ with this program. If not, see .\n\npackage versioner\n\nimport (\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\nfunc TestTaggedFilename(t *testing.T) {\n\tcases := [][3]string{\n\t\t{filepath.Join(\"foo\", \"bar.baz\"), \"tag\", filepath.Join(\"foo\", \"bar~tag.baz\")},\n\t\t{\"bar.baz\", \"tag\", \"bar~tag.baz\"},\n\t\t{\"bar\", \"tag\", \"bar~tag\"},\n\n\t\t\/\/ Parsing test only\n\t\t{\"\", \"tag-only\", \"foo\/bar.baz~tag-only\"},\n\t\t{\"\", \"tag-only\", \"bar.baz~tag-only\"},\n\t}\n\n\tfor _, tc := range cases {\n\t\tif tc[0] != \"\" {\n\t\t\t\/\/ Test tagger\n\t\t\ttf := taggedFilename(tc[0], tc[1])\n\t\t\tif tf != tc[2] {\n\t\t\t\tt.Errorf(\"%s != %s\", tf, tc[2])\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Test parser\n\t\ttag := filenameTag(tc[2])\n\t\tif tag != tc[1] {\n\t\t\tt.Errorf(\"%s != %s\", tag, tc[1])\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage images\n\nimport (\n\tgoerrors \"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/record\"\n\tstatsapi \"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/stats\/v1alpha1\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/container\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/events\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/util\/sliceutils\"\n)\n\n\/\/ StatsProvider is an interface for fetching stats used during image garbage\n\/\/ collection.\ntype StatsProvider interface {\n\t\/\/ ImageFsStats returns the stats of the image filesystem.\n\tImageFsStats() (*statsapi.FsStats, error)\n}\n\n\/\/ Manages lifecycle of all images.\n\/\/\n\/\/ Implementation is thread-safe.\ntype ImageGCManager interface {\n\t\/\/ Applies the garbage collection policy. Errors include being unable to free\n\t\/\/ enough space as per the garbage collection policy.\n\tGarbageCollect() error\n\n\t\/\/ Start async garbage collection of images.\n\tStart()\n\n\tGetImageList() ([]container.Image, error)\n\n\t\/\/ Delete all unused images.\n\tDeleteUnusedImages() error\n}\n\n\/\/ A policy for garbage collecting images. Policy defines an allowed band in\n\/\/ which garbage collection will be run.\ntype ImageGCPolicy struct {\n\t\/\/ Any usage above this threshold will always trigger garbage collection.\n\t\/\/ This is the highest usage we will allow.\n\tHighThresholdPercent int\n\n\t\/\/ Any usage below this threshold will never trigger garbage collection.\n\t\/\/ This is the lowest threshold we will try to garbage collect to.\n\tLowThresholdPercent int\n\n\t\/\/ Minimum age at which an image can be garbage collected.\n\tMinAge time.Duration\n}\n\ntype realImageGCManager struct {\n\t\/\/ Container runtime\n\truntime container.Runtime\n\n\t\/\/ Records of images and their use.\n\timageRecords map[string]*imageRecord\n\timageRecordsLock sync.Mutex\n\n\t\/\/ The image garbage collection policy in use.\n\tpolicy ImageGCPolicy\n\n\t\/\/ statsProvider provides stats used during image garbage collection.\n\tstatsProvider StatsProvider\n\n\t\/\/ Recorder for Kubernetes events.\n\trecorder record.EventRecorder\n\n\t\/\/ Reference to this node.\n\tnodeRef *v1.ObjectReference\n\n\t\/\/ Track initialization\n\tinitialized bool\n\n\t\/\/ imageCache is the cache of latest image list.\n\timageCache imageCache\n\n\t\/\/ sandbox image exempted from GC\n\tsandboxImage string\n}\n\n\/\/ imageCache caches latest result of ListImages.\ntype imageCache struct {\n\t\/\/ sync.RWMutex is the mutex protects the image cache.\n\tsync.RWMutex\n\t\/\/ images is the image cache.\n\timages []container.Image\n}\n\n\/\/ set updates image cache.\nfunc (i *imageCache) set(images []container.Image) {\n\ti.Lock()\n\tdefer i.Unlock()\n\ti.images = images\n}\n\n\/\/ get gets a sorted (by image size) image list from image cache.\n\/\/ There is a potentical data race in this function. See PR #60448\n\/\/ Because there is deepcopy function available currently, move sort\n\/\/ function inside this function\nfunc (i *imageCache) get() []container.Image {\n\ti.Lock()\n\tdefer i.Unlock()\n\tsort.Sort(sliceutils.ByImageSize(i.images))\n\treturn i.images\n}\n\n\/\/ Information about the images we track.\ntype imageRecord struct {\n\t\/\/ Time when this image was first detected.\n\tfirstDetected time.Time\n\n\t\/\/ Time when we last saw this image being used.\n\tlastUsed time.Time\n\n\t\/\/ Size of the image in bytes.\n\tsize int64\n}\n\nfunc NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string) (ImageGCManager, error) {\n\t\/\/ Validate policy.\n\tif policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {\n\t\treturn nil, fmt.Errorf(\"invalid HighThresholdPercent %d, must be in range [0-100]\", policy.HighThresholdPercent)\n\t}\n\tif policy.LowThresholdPercent < 0 || policy.LowThresholdPercent > 100 {\n\t\treturn nil, fmt.Errorf(\"invalid LowThresholdPercent %d, must be in range [0-100]\", policy.LowThresholdPercent)\n\t}\n\tif policy.LowThresholdPercent > policy.HighThresholdPercent {\n\t\treturn nil, fmt.Errorf(\"LowThresholdPercent %d can not be higher than HighThresholdPercent %d\", policy.LowThresholdPercent, policy.HighThresholdPercent)\n\t}\n\tim := &realImageGCManager{\n\t\truntime: runtime,\n\t\tpolicy: policy,\n\t\timageRecords: make(map[string]*imageRecord),\n\t\tstatsProvider: statsProvider,\n\t\trecorder: recorder,\n\t\tnodeRef: nodeRef,\n\t\tinitialized: false,\n\t\tsandboxImage: sandboxImage,\n\t}\n\n\treturn im, nil\n}\n\nfunc (im *realImageGCManager) Start() {\n\tgo wait.Until(func() {\n\t\t\/\/ Initial detection make detected time \"unknown\" in the past.\n\t\tvar ts time.Time\n\t\tif im.initialized {\n\t\t\tts = time.Now()\n\t\t}\n\t\t_, err := im.detectImages(ts)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"[imageGCManager] Failed to monitor images: %v\", err)\n\t\t} else {\n\t\t\tim.initialized = true\n\t\t}\n\t}, 5*time.Minute, wait.NeverStop)\n\n\t\/\/ Start a goroutine periodically updates image cache.\n\t\/\/ TODO(random-liu): Merge this with the previous loop.\n\tgo wait.Until(func() {\n\t\timages, err := im.runtime.ListImages()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"[imageGCManager] Failed to update image list: %v\", err)\n\t\t} else {\n\t\t\tim.imageCache.set(images)\n\t\t}\n\t}, 30*time.Second, wait.NeverStop)\n\n}\n\n\/\/ Get a list of images on this node\nfunc (im *realImageGCManager) GetImageList() ([]container.Image, error) {\n\treturn im.imageCache.get(), nil\n}\n\nfunc (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) {\n\timagesInUse := sets.NewString()\n\n\t\/\/ Always consider the container runtime pod sandbox image in use\n\timageRef, err := im.runtime.GetImageRef(container.ImageSpec{Image: im.sandboxImage})\n\tif err == nil && imageRef != \"\" {\n\t\timagesInUse.Insert(imageRef)\n\t}\n\n\timages, err := im.runtime.ListImages()\n\tif err != nil {\n\t\treturn imagesInUse, err\n\t}\n\tpods, err := im.runtime.GetPods(true)\n\tif err != nil {\n\t\treturn imagesInUse, err\n\t}\n\n\t\/\/ Make a set of images in use by containers.\n\tfor _, pod := range pods {\n\t\tfor _, container := range pod.Containers {\n\t\t\tglog.V(5).Infof(\"Pod %s\/%s, container %s uses image %s(%s)\", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID)\n\t\t\timagesInUse.Insert(container.ImageID)\n\t\t}\n\t}\n\n\t\/\/ Add new images and record those being used.\n\tnow := time.Now()\n\tcurrentImages := sets.NewString()\n\tim.imageRecordsLock.Lock()\n\tdefer im.imageRecordsLock.Unlock()\n\tfor _, image := range images {\n\t\tglog.V(5).Infof(\"Adding image ID %s to currentImages\", image.ID)\n\t\tcurrentImages.Insert(image.ID)\n\n\t\t\/\/ New image, set it as detected now.\n\t\tif _, ok := im.imageRecords[image.ID]; !ok {\n\t\t\tglog.V(5).Infof(\"Image ID %s is new\", image.ID)\n\t\t\tim.imageRecords[image.ID] = &imageRecord{\n\t\t\t\tfirstDetected: detectTime,\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Set last used time to now if the image is being used.\n\t\tif isImageUsed(image.ID, imagesInUse) {\n\t\t\tglog.V(5).Infof(\"Setting Image ID %s lastUsed to %v\", image.ID, now)\n\t\t\tim.imageRecords[image.ID].lastUsed = now\n\t\t}\n\n\t\tglog.V(5).Infof(\"Image ID %s has size %d\", image.ID, image.Size)\n\t\tim.imageRecords[image.ID].size = image.Size\n\t}\n\n\t\/\/ Remove old images from our records.\n\tfor image := range im.imageRecords {\n\t\tif !currentImages.Has(image) {\n\t\t\tglog.V(5).Infof(\"Image ID %s is no longer present; removing from imageRecords\", image)\n\t\t\tdelete(im.imageRecords, image)\n\t\t}\n\t}\n\n\treturn imagesInUse, nil\n}\n\nfunc (im *realImageGCManager) GarbageCollect() error {\n\t\/\/ Get disk usage on disk holding images.\n\tfsStats, err := im.statsProvider.ImageFsStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar capacity, available int64\n\tif fsStats.CapacityBytes != nil {\n\t\tcapacity = int64(*fsStats.CapacityBytes)\n\t}\n\tif fsStats.AvailableBytes != nil {\n\t\tavailable = int64(*fsStats.AvailableBytes)\n\t}\n\n\tif available > capacity {\n\t\tglog.Warningf(\"available %d is larger than capacity %d\", available, capacity)\n\t\tavailable = capacity\n\t}\n\n\t\/\/ Check valid capacity.\n\tif capacity == 0 {\n\t\terr := goerrors.New(\"invalid capacity 0 on image filesystem\")\n\t\tim.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())\n\t\treturn err\n\t}\n\n\t\/\/ If over the max threshold, free enough to place us at the lower threshold.\n\tusagePercent := 100 - int(available*100\/capacity)\n\tif usagePercent >= im.policy.HighThresholdPercent {\n\t\tamountToFree := capacity*int64(100-im.policy.LowThresholdPercent)\/100 - available\n\t\tglog.Infof(\"[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes\", usagePercent, im.policy.HighThresholdPercent, amountToFree)\n\t\tfreed, err := im.freeSpace(amountToFree, time.Now())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif freed < amountToFree {\n\t\t\terr := fmt.Errorf(\"failed to garbage collect required amount of images. Wanted to free %d bytes, but freed %d bytes\", amountToFree, freed)\n\t\t\tim.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (im *realImageGCManager) DeleteUnusedImages() error {\n\tglog.Infof(\"attempting to delete unused images\")\n\t_, err := im.freeSpace(math.MaxInt64, time.Now())\n\treturn err\n}\n\n\/\/ Tries to free bytesToFree worth of images on the disk.\n\/\/\n\/\/ Returns the number of bytes free and an error if any occurred. The number of\n\/\/ bytes freed is always returned.\n\/\/ Note that error may be nil and the number of bytes free may be less\n\/\/ than bytesToFree.\nfunc (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) {\n\timagesInUse, err := im.detectImages(freeTime)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tim.imageRecordsLock.Lock()\n\tdefer im.imageRecordsLock.Unlock()\n\n\t\/\/ Get all images in eviction order.\n\timages := make([]evictionInfo, 0, len(im.imageRecords))\n\tfor image, record := range im.imageRecords {\n\t\tif isImageUsed(image, imagesInUse) {\n\t\t\tglog.V(5).Infof(\"Image ID %s is being used\", image)\n\t\t\tcontinue\n\t\t}\n\t\timages = append(images, evictionInfo{\n\t\t\tid: image,\n\t\t\timageRecord: *record,\n\t\t})\n\t}\n\tsort.Sort(byLastUsedAndDetected(images))\n\n\t\/\/ Delete unused images until we've freed up enough space.\n\tvar deletionErrors []error\n\tspaceFreed := int64(0)\n\tfor _, image := range images {\n\t\tglog.V(5).Infof(\"Evaluating image ID %s for possible garbage collection\", image.id)\n\t\t\/\/ Images that are currently in used were given a newer lastUsed.\n\t\tif image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) {\n\t\t\tglog.V(5).Infof(\"Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection\", image.id, image.lastUsed, freeTime)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Avoid garbage collect the image if the image is not old enough.\n\t\t\/\/ In such a case, the image may have just been pulled down, and will be used by a container right away.\n\n\t\tif freeTime.Sub(image.firstDetected) < im.policy.MinAge {\n\t\t\tglog.V(5).Infof(\"Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection\", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Remove image. Continue despite errors.\n\t\tglog.Infof(\"[imageGCManager]: Removing image %q to free %d bytes\", image.id, image.size)\n\t\terr := im.runtime.RemoveImage(container.ImageSpec{Image: image.id})\n\t\tif err != nil {\n\t\t\tdeletionErrors = append(deletionErrors, err)\n\t\t\tcontinue\n\t\t}\n\t\tdelete(im.imageRecords, image.id)\n\t\tspaceFreed += image.size\n\n\t\tif spaceFreed >= bytesToFree {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(deletionErrors) > 0 {\n\t\treturn spaceFreed, fmt.Errorf(\"wanted to free %d bytes, but freed %d bytes space with errors in image deletion: %v\", bytesToFree, spaceFreed, errors.NewAggregate(deletionErrors))\n\t}\n\treturn spaceFreed, nil\n}\n\ntype evictionInfo struct {\n\tid string\n\timageRecord\n}\n\ntype byLastUsedAndDetected []evictionInfo\n\nfunc (ev byLastUsedAndDetected) Len() int { return len(ev) }\nfunc (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] }\nfunc (ev byLastUsedAndDetected) Less(i, j int) bool {\n\t\/\/ Sort by last used, break ties by detected.\n\tif ev[i].lastUsed.Equal(ev[j].lastUsed) {\n\t\treturn ev[i].firstDetected.Before(ev[j].firstDetected)\n\t} else {\n\t\treturn ev[i].lastUsed.Before(ev[j].lastUsed)\n\t}\n}\n\nfunc isImageUsed(imageID string, imagesInUse sets.String) bool {\n\t\/\/ Check the image ID.\n\tif _, ok := imagesInUse[imageID]; ok {\n\t\treturn true\n\t}\n\treturn false\n}\nindent error flow\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage images\n\nimport (\n\tgoerrors \"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/record\"\n\tstatsapi \"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/stats\/v1alpha1\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/container\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/events\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/util\/sliceutils\"\n)\n\n\/\/ StatsProvider is an interface for fetching stats used during image garbage\n\/\/ collection.\ntype StatsProvider interface {\n\t\/\/ ImageFsStats returns the stats of the image filesystem.\n\tImageFsStats() (*statsapi.FsStats, error)\n}\n\n\/\/ Manages lifecycle of all images.\n\/\/\n\/\/ Implementation is thread-safe.\ntype ImageGCManager interface {\n\t\/\/ Applies the garbage collection policy. Errors include being unable to free\n\t\/\/ enough space as per the garbage collection policy.\n\tGarbageCollect() error\n\n\t\/\/ Start async garbage collection of images.\n\tStart()\n\n\tGetImageList() ([]container.Image, error)\n\n\t\/\/ Delete all unused images.\n\tDeleteUnusedImages() error\n}\n\n\/\/ A policy for garbage collecting images. Policy defines an allowed band in\n\/\/ which garbage collection will be run.\ntype ImageGCPolicy struct {\n\t\/\/ Any usage above this threshold will always trigger garbage collection.\n\t\/\/ This is the highest usage we will allow.\n\tHighThresholdPercent int\n\n\t\/\/ Any usage below this threshold will never trigger garbage collection.\n\t\/\/ This is the lowest threshold we will try to garbage collect to.\n\tLowThresholdPercent int\n\n\t\/\/ Minimum age at which an image can be garbage collected.\n\tMinAge time.Duration\n}\n\ntype realImageGCManager struct {\n\t\/\/ Container runtime\n\truntime container.Runtime\n\n\t\/\/ Records of images and their use.\n\timageRecords map[string]*imageRecord\n\timageRecordsLock sync.Mutex\n\n\t\/\/ The image garbage collection policy in use.\n\tpolicy ImageGCPolicy\n\n\t\/\/ statsProvider provides stats used during image garbage collection.\n\tstatsProvider StatsProvider\n\n\t\/\/ Recorder for Kubernetes events.\n\trecorder record.EventRecorder\n\n\t\/\/ Reference to this node.\n\tnodeRef *v1.ObjectReference\n\n\t\/\/ Track initialization\n\tinitialized bool\n\n\t\/\/ imageCache is the cache of latest image list.\n\timageCache imageCache\n\n\t\/\/ sandbox image exempted from GC\n\tsandboxImage string\n}\n\n\/\/ imageCache caches latest result of ListImages.\ntype imageCache struct {\n\t\/\/ sync.RWMutex is the mutex protects the image cache.\n\tsync.RWMutex\n\t\/\/ images is the image cache.\n\timages []container.Image\n}\n\n\/\/ set updates image cache.\nfunc (i *imageCache) set(images []container.Image) {\n\ti.Lock()\n\tdefer i.Unlock()\n\ti.images = images\n}\n\n\/\/ get gets a sorted (by image size) image list from image cache.\n\/\/ There is a potentical data race in this function. See PR #60448\n\/\/ Because there is deepcopy function available currently, move sort\n\/\/ function inside this function\nfunc (i *imageCache) get() []container.Image {\n\ti.Lock()\n\tdefer i.Unlock()\n\tsort.Sort(sliceutils.ByImageSize(i.images))\n\treturn i.images\n}\n\n\/\/ Information about the images we track.\ntype imageRecord struct {\n\t\/\/ Time when this image was first detected.\n\tfirstDetected time.Time\n\n\t\/\/ Time when we last saw this image being used.\n\tlastUsed time.Time\n\n\t\/\/ Size of the image in bytes.\n\tsize int64\n}\n\nfunc NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string) (ImageGCManager, error) {\n\t\/\/ Validate policy.\n\tif policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {\n\t\treturn nil, fmt.Errorf(\"invalid HighThresholdPercent %d, must be in range [0-100]\", policy.HighThresholdPercent)\n\t}\n\tif policy.LowThresholdPercent < 0 || policy.LowThresholdPercent > 100 {\n\t\treturn nil, fmt.Errorf(\"invalid LowThresholdPercent %d, must be in range [0-100]\", policy.LowThresholdPercent)\n\t}\n\tif policy.LowThresholdPercent > policy.HighThresholdPercent {\n\t\treturn nil, fmt.Errorf(\"LowThresholdPercent %d can not be higher than HighThresholdPercent %d\", policy.LowThresholdPercent, policy.HighThresholdPercent)\n\t}\n\tim := &realImageGCManager{\n\t\truntime: runtime,\n\t\tpolicy: policy,\n\t\timageRecords: make(map[string]*imageRecord),\n\t\tstatsProvider: statsProvider,\n\t\trecorder: recorder,\n\t\tnodeRef: nodeRef,\n\t\tinitialized: false,\n\t\tsandboxImage: sandboxImage,\n\t}\n\n\treturn im, nil\n}\n\nfunc (im *realImageGCManager) Start() {\n\tgo wait.Until(func() {\n\t\t\/\/ Initial detection make detected time \"unknown\" in the past.\n\t\tvar ts time.Time\n\t\tif im.initialized {\n\t\t\tts = time.Now()\n\t\t}\n\t\t_, err := im.detectImages(ts)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"[imageGCManager] Failed to monitor images: %v\", err)\n\t\t} else {\n\t\t\tim.initialized = true\n\t\t}\n\t}, 5*time.Minute, wait.NeverStop)\n\n\t\/\/ Start a goroutine periodically updates image cache.\n\t\/\/ TODO(random-liu): Merge this with the previous loop.\n\tgo wait.Until(func() {\n\t\timages, err := im.runtime.ListImages()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"[imageGCManager] Failed to update image list: %v\", err)\n\t\t} else {\n\t\t\tim.imageCache.set(images)\n\t\t}\n\t}, 30*time.Second, wait.NeverStop)\n\n}\n\n\/\/ Get a list of images on this node\nfunc (im *realImageGCManager) GetImageList() ([]container.Image, error) {\n\treturn im.imageCache.get(), nil\n}\n\nfunc (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) {\n\timagesInUse := sets.NewString()\n\n\t\/\/ Always consider the container runtime pod sandbox image in use\n\timageRef, err := im.runtime.GetImageRef(container.ImageSpec{Image: im.sandboxImage})\n\tif err == nil && imageRef != \"\" {\n\t\timagesInUse.Insert(imageRef)\n\t}\n\n\timages, err := im.runtime.ListImages()\n\tif err != nil {\n\t\treturn imagesInUse, err\n\t}\n\tpods, err := im.runtime.GetPods(true)\n\tif err != nil {\n\t\treturn imagesInUse, err\n\t}\n\n\t\/\/ Make a set of images in use by containers.\n\tfor _, pod := range pods {\n\t\tfor _, container := range pod.Containers {\n\t\t\tglog.V(5).Infof(\"Pod %s\/%s, container %s uses image %s(%s)\", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID)\n\t\t\timagesInUse.Insert(container.ImageID)\n\t\t}\n\t}\n\n\t\/\/ Add new images and record those being used.\n\tnow := time.Now()\n\tcurrentImages := sets.NewString()\n\tim.imageRecordsLock.Lock()\n\tdefer im.imageRecordsLock.Unlock()\n\tfor _, image := range images {\n\t\tglog.V(5).Infof(\"Adding image ID %s to currentImages\", image.ID)\n\t\tcurrentImages.Insert(image.ID)\n\n\t\t\/\/ New image, set it as detected now.\n\t\tif _, ok := im.imageRecords[image.ID]; !ok {\n\t\t\tglog.V(5).Infof(\"Image ID %s is new\", image.ID)\n\t\t\tim.imageRecords[image.ID] = &imageRecord{\n\t\t\t\tfirstDetected: detectTime,\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Set last used time to now if the image is being used.\n\t\tif isImageUsed(image.ID, imagesInUse) {\n\t\t\tglog.V(5).Infof(\"Setting Image ID %s lastUsed to %v\", image.ID, now)\n\t\t\tim.imageRecords[image.ID].lastUsed = now\n\t\t}\n\n\t\tglog.V(5).Infof(\"Image ID %s has size %d\", image.ID, image.Size)\n\t\tim.imageRecords[image.ID].size = image.Size\n\t}\n\n\t\/\/ Remove old images from our records.\n\tfor image := range im.imageRecords {\n\t\tif !currentImages.Has(image) {\n\t\t\tglog.V(5).Infof(\"Image ID %s is no longer present; removing from imageRecords\", image)\n\t\t\tdelete(im.imageRecords, image)\n\t\t}\n\t}\n\n\treturn imagesInUse, nil\n}\n\nfunc (im *realImageGCManager) GarbageCollect() error {\n\t\/\/ Get disk usage on disk holding images.\n\tfsStats, err := im.statsProvider.ImageFsStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar capacity, available int64\n\tif fsStats.CapacityBytes != nil {\n\t\tcapacity = int64(*fsStats.CapacityBytes)\n\t}\n\tif fsStats.AvailableBytes != nil {\n\t\tavailable = int64(*fsStats.AvailableBytes)\n\t}\n\n\tif available > capacity {\n\t\tglog.Warningf(\"available %d is larger than capacity %d\", available, capacity)\n\t\tavailable = capacity\n\t}\n\n\t\/\/ Check valid capacity.\n\tif capacity == 0 {\n\t\terr := goerrors.New(\"invalid capacity 0 on image filesystem\")\n\t\tim.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())\n\t\treturn err\n\t}\n\n\t\/\/ If over the max threshold, free enough to place us at the lower threshold.\n\tusagePercent := 100 - int(available*100\/capacity)\n\tif usagePercent >= im.policy.HighThresholdPercent {\n\t\tamountToFree := capacity*int64(100-im.policy.LowThresholdPercent)\/100 - available\n\t\tglog.Infof(\"[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes\", usagePercent, im.policy.HighThresholdPercent, amountToFree)\n\t\tfreed, err := im.freeSpace(amountToFree, time.Now())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif freed < amountToFree {\n\t\t\terr := fmt.Errorf(\"failed to garbage collect required amount of images. Wanted to free %d bytes, but freed %d bytes\", amountToFree, freed)\n\t\t\tim.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (im *realImageGCManager) DeleteUnusedImages() error {\n\tglog.Infof(\"attempting to delete unused images\")\n\t_, err := im.freeSpace(math.MaxInt64, time.Now())\n\treturn err\n}\n\n\/\/ Tries to free bytesToFree worth of images on the disk.\n\/\/\n\/\/ Returns the number of bytes free and an error if any occurred. The number of\n\/\/ bytes freed is always returned.\n\/\/ Note that error may be nil and the number of bytes free may be less\n\/\/ than bytesToFree.\nfunc (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) {\n\timagesInUse, err := im.detectImages(freeTime)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tim.imageRecordsLock.Lock()\n\tdefer im.imageRecordsLock.Unlock()\n\n\t\/\/ Get all images in eviction order.\n\timages := make([]evictionInfo, 0, len(im.imageRecords))\n\tfor image, record := range im.imageRecords {\n\t\tif isImageUsed(image, imagesInUse) {\n\t\t\tglog.V(5).Infof(\"Image ID %s is being used\", image)\n\t\t\tcontinue\n\t\t}\n\t\timages = append(images, evictionInfo{\n\t\t\tid: image,\n\t\t\timageRecord: *record,\n\t\t})\n\t}\n\tsort.Sort(byLastUsedAndDetected(images))\n\n\t\/\/ Delete unused images until we've freed up enough space.\n\tvar deletionErrors []error\n\tspaceFreed := int64(0)\n\tfor _, image := range images {\n\t\tglog.V(5).Infof(\"Evaluating image ID %s for possible garbage collection\", image.id)\n\t\t\/\/ Images that are currently in used were given a newer lastUsed.\n\t\tif image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) {\n\t\t\tglog.V(5).Infof(\"Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection\", image.id, image.lastUsed, freeTime)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Avoid garbage collect the image if the image is not old enough.\n\t\t\/\/ In such a case, the image may have just been pulled down, and will be used by a container right away.\n\n\t\tif freeTime.Sub(image.firstDetected) < im.policy.MinAge {\n\t\t\tglog.V(5).Infof(\"Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection\", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Remove image. Continue despite errors.\n\t\tglog.Infof(\"[imageGCManager]: Removing image %q to free %d bytes\", image.id, image.size)\n\t\terr := im.runtime.RemoveImage(container.ImageSpec{Image: image.id})\n\t\tif err != nil {\n\t\t\tdeletionErrors = append(deletionErrors, err)\n\t\t\tcontinue\n\t\t}\n\t\tdelete(im.imageRecords, image.id)\n\t\tspaceFreed += image.size\n\n\t\tif spaceFreed >= bytesToFree {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(deletionErrors) > 0 {\n\t\treturn spaceFreed, fmt.Errorf(\"wanted to free %d bytes, but freed %d bytes space with errors in image deletion: %v\", bytesToFree, spaceFreed, errors.NewAggregate(deletionErrors))\n\t}\n\treturn spaceFreed, nil\n}\n\ntype evictionInfo struct {\n\tid string\n\timageRecord\n}\n\ntype byLastUsedAndDetected []evictionInfo\n\nfunc (ev byLastUsedAndDetected) Len() int { return len(ev) }\nfunc (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] }\nfunc (ev byLastUsedAndDetected) Less(i, j int) bool {\n\t\/\/ Sort by last used, break ties by detected.\n\tif ev[i].lastUsed.Equal(ev[j].lastUsed) {\n\t\treturn ev[i].firstDetected.Before(ev[j].firstDetected)\n\t}\n\treturn ev[i].lastUsed.Before(ev[j].lastUsed)\n}\n\nfunc isImageUsed(imageID string, imagesInUse sets.String) bool {\n\t\/\/ Check the image ID.\n\tif _, ok := imagesInUse[imageID]; ok {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"package wire\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/protocol\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/utils\"\n)\n\n\/\/ ComposeGQUICVersionNegotiation composes a Version Negotiation Packet for gQUIC\nfunc ComposeGQUICVersionNegotiation(connID protocol.ConnectionID, versions []protocol.VersionNumber) []byte {\n\tbuf := bytes.NewBuffer(make([]byte, 0, 1+8+len(versions)*4))\n\tbuf.Write([]byte{0x1 | 0x8}) \/\/ type byte\n\tbuf.Write(connID)\n\tfor _, v := range versions {\n\t\tutils.BigEndian.WriteUint32(buf, uint32(v))\n\t}\n\treturn buf.Bytes()\n}\n\n\/\/ ComposeVersionNegotiation composes a Version Negotiation according to the IETF draft\nfunc ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, versions []protocol.VersionNumber) ([]byte, error) {\n\tgreasedVersions := protocol.GetGreasedVersions(versions)\n\tbuf := bytes.NewBuffer(make([]byte, 0, 1+8+4+len(greasedVersions)*4))\n\tr := make([]byte, 1)\n\t_, _ = rand.Read(r) \/\/ ignore the error here. It is not critical to have perfect random here.\n\tbuf.WriteByte(r[0] | 0x80)\n\tutils.BigEndian.WriteUint32(buf, 0) \/\/ version 0\n\tconnIDLen, err := encodeConnIDLen(destConnID, srcConnID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf.WriteByte(connIDLen)\n\tbuf.Write(destConnID)\n\tbuf.Write(srcConnID)\n\tfor _, v := range greasedVersions {\n\t\tutils.BigEndian.WriteUint32(buf, uint32(v))\n\t}\n\treturn buf.Bytes(), nil\n}\nfix buffer size for version negotiation packetspackage wire\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/protocol\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/utils\"\n)\n\n\/\/ ComposeGQUICVersionNegotiation composes a Version Negotiation Packet for gQUIC\nfunc ComposeGQUICVersionNegotiation(connID protocol.ConnectionID, versions []protocol.VersionNumber) []byte {\n\tbuf := bytes.NewBuffer(make([]byte, 0, 1+8+len(versions)*4))\n\tbuf.Write([]byte{0x1 | 0x8}) \/\/ type byte\n\tbuf.Write(connID)\n\tfor _, v := range versions {\n\t\tutils.BigEndian.WriteUint32(buf, uint32(v))\n\t}\n\treturn buf.Bytes()\n}\n\n\/\/ ComposeVersionNegotiation composes a Version Negotiation according to the IETF draft\nfunc ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, versions []protocol.VersionNumber) ([]byte, error) {\n\tgreasedVersions := protocol.GetGreasedVersions(versions)\n\texpectedLen := 1 \/* type byte *\/ + 4 \/* version field *\/ + 1 \/* connection ID length field *\/ + destConnID.Len() + srcConnID.Len() + len(greasedVersions)*4\n\tbuf := bytes.NewBuffer(make([]byte, 0, expectedLen))\n\tr := make([]byte, 1)\n\t_, _ = rand.Read(r) \/\/ ignore the error here. It is not critical to have perfect random here.\n\tbuf.WriteByte(r[0] | 0x80)\n\tutils.BigEndian.WriteUint32(buf, 0) \/\/ version 0\n\tconnIDLen, err := encodeConnIDLen(destConnID, srcConnID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf.WriteByte(connIDLen)\n\tbuf.Write(destConnID)\n\tbuf.Write(srcConnID)\n\tfor _, v := range greasedVersions {\n\t\tutils.BigEndian.WriteUint32(buf, uint32(v))\n\t}\n\treturn buf.Bytes(), nil\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage hairpin\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/exec\"\n)\n\nconst (\n\tsysfsNetPath = \"\/sys\/devices\/virtual\/net\"\n\tbrportRelativePath = \"brport\"\n\thairpinModeRelativePath = \"hairpin_mode\"\n\thairpinEnable = \"1\"\n)\n\nvar (\n\tethtoolOutputRegex = regexp.MustCompile(\"peer_ifindex: (\\\\d+)\")\n)\n\nfunc SetUpContainerPid(containerPid int, containerInterfaceName string) error {\n\tpidStr := fmt.Sprintf(\"%d\", containerPid)\n\tnsenterArgs := []string{\"-t\", pidStr, \"-n\"}\n\treturn setUpContainerInternal(containerInterfaceName, pidStr, nsenterArgs)\n}\n\nfunc SetUpContainerPath(netnsPath string, containerInterfaceName string) error {\n\tif netnsPath[0] != '\/' {\n\t\treturn fmt.Errorf(\"netnsPath path '%s' was invalid\", netnsPath)\n\t}\n\tnsenterArgs := []string{\"-n\", netnsPath}\n\treturn setUpContainerInternal(containerInterfaceName, netnsPath, nsenterArgs)\n}\n\nfunc setUpContainerInternal(containerInterfaceName, containerDesc string, nsenterArgs []string) error {\n\te := exec.New()\n\thostIfName, err := findPairInterfaceOfContainerInterface(e, containerInterfaceName, containerDesc, nsenterArgs)\n\tif err != nil {\n\t\tglog.Infof(\"Unable to find pair interface, setting up all interfaces: %v\", err)\n\t\treturn setUpAllInterfaces()\n\t}\n\treturn setUpInterface(hostIfName)\n}\n\nfunc findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {\n\tnsenterPath, err := e.LookPath(\"nsenter\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tethtoolPath, err := e.LookPath(\"ethtool\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tnsenterArgs = append(nsenterArgs, \"-F\", \"--\", ethtoolPath, \"--statistics\", containerInterfaceName)\n\toutput, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Unable to query interface %s of container %s: %v: %s\", containerInterfaceName, containerDesc, err, string(output))\n\t}\n\t\/\/ look for peer_ifindex\n\tmatch := ethtoolOutputRegex.FindSubmatch(output)\n\tif match == nil {\n\t\treturn \"\", fmt.Errorf(\"No peer_ifindex in interface statistics for %s of container %s\", containerInterfaceName, containerDesc)\n\t}\n\tpeerIfIndex, err := strconv.Atoi(string(match[1]))\n\tif err != nil { \/\/ seems impossible (\\d+ not numeric)\n\t\treturn \"\", fmt.Errorf(\"peer_ifindex wasn't numeric: %s: %v\", match[1], err)\n\t}\n\tiface, err := net.InterfaceByIndex(peerIfIndex)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn iface.Name, nil\n}\n\nfunc setUpAllInterfaces() error {\n\tinterfaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, netIf := range interfaces {\n\t\tsetUpInterface(netIf.Name) \/\/ ignore errors\n\t}\n\treturn nil\n}\n\nfunc setUpInterface(ifName string) error {\n\tglog.V(3).Infof(\"Enabling hairpin on interface %s\", ifName)\n\tifPath := path.Join(sysfsNetPath, ifName)\n\tif _, err := os.Stat(ifPath); err != nil {\n\t\treturn err\n\t}\n\tbrportPath := path.Join(ifPath, brportRelativePath)\n\tif _, err := os.Stat(brportPath); err != nil && os.IsNotExist(err) {\n\t\t\/\/ Device is not on a bridge, so doesn't need hairpin mode\n\t\treturn nil\n\t}\n\thairpinModeFile := path.Join(brportPath, hairpinModeRelativePath)\n\treturn ioutil.WriteFile(hairpinModeFile, []byte(hairpinEnable), 0644)\n}\nAbandon setting hairpin mode if finding the peer interface fails\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage hairpin\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/exec\"\n)\n\nconst (\n\tsysfsNetPath = \"\/sys\/devices\/virtual\/net\"\n\tbrportRelativePath = \"brport\"\n\thairpinModeRelativePath = \"hairpin_mode\"\n\thairpinEnable = \"1\"\n)\n\nvar (\n\tethtoolOutputRegex = regexp.MustCompile(\"peer_ifindex: (\\\\d+)\")\n)\n\nfunc SetUpContainerPid(containerPid int, containerInterfaceName string) error {\n\tpidStr := fmt.Sprintf(\"%d\", containerPid)\n\tnsenterArgs := []string{\"-t\", pidStr, \"-n\"}\n\treturn setUpContainerInternal(containerInterfaceName, pidStr, nsenterArgs)\n}\n\nfunc SetUpContainerPath(netnsPath string, containerInterfaceName string) error {\n\tif netnsPath[0] != '\/' {\n\t\treturn fmt.Errorf(\"netnsPath path '%s' was invalid\", netnsPath)\n\t}\n\tnsenterArgs := []string{\"-n\", netnsPath}\n\treturn setUpContainerInternal(containerInterfaceName, netnsPath, nsenterArgs)\n}\n\nfunc setUpContainerInternal(containerInterfaceName, containerDesc string, nsenterArgs []string) error {\n\te := exec.New()\n\thostIfName, err := findPairInterfaceOfContainerInterface(e, containerInterfaceName, containerDesc, nsenterArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn setUpInterface(hostIfName)\n}\n\nfunc findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {\n\tnsenterPath, err := e.LookPath(\"nsenter\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tethtoolPath, err := e.LookPath(\"ethtool\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tnsenterArgs = append(nsenterArgs, \"-F\", \"--\", ethtoolPath, \"--statistics\", containerInterfaceName)\n\toutput, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Unable to query interface %s of container %s: %v: %s\", containerInterfaceName, containerDesc, err, string(output))\n\t}\n\t\/\/ look for peer_ifindex\n\tmatch := ethtoolOutputRegex.FindSubmatch(output)\n\tif match == nil {\n\t\treturn \"\", fmt.Errorf(\"No peer_ifindex in interface statistics for %s of container %s\", containerInterfaceName, containerDesc)\n\t}\n\tpeerIfIndex, err := strconv.Atoi(string(match[1]))\n\tif err != nil { \/\/ seems impossible (\\d+ not numeric)\n\t\treturn \"\", fmt.Errorf(\"peer_ifindex wasn't numeric: %s: %v\", match[1], err)\n\t}\n\tiface, err := net.InterfaceByIndex(peerIfIndex)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn iface.Name, nil\n}\n\nfunc setUpInterface(ifName string) error {\n\tglog.V(3).Infof(\"Enabling hairpin on interface %s\", ifName)\n\tifPath := path.Join(sysfsNetPath, ifName)\n\tif _, err := os.Stat(ifPath); err != nil {\n\t\treturn err\n\t}\n\tbrportPath := path.Join(ifPath, brportRelativePath)\n\tif _, err := os.Stat(brportPath); err != nil && os.IsNotExist(err) {\n\t\t\/\/ Device is not on a bridge, so doesn't need hairpin mode\n\t\treturn nil\n\t}\n\thairpinModeFile := path.Join(brportPath, hairpinModeRelativePath)\n\treturn ioutil.WriteFile(hairpinModeFile, []byte(hairpinEnable), 0644)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2018 - The Event Horizon authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage local\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\teh \"github.com\/looplab\/eventhorizon\"\n\t\"github.com\/looplab\/eventhorizon\/codec\/json\"\n)\n\n\/\/ DefaultQueueSize is the default queue size per handler for publishing events.\nvar DefaultQueueSize = 10\n\n\/\/ EventBus is a local event bus that delegates handling of published events\n\/\/ to all matching registered handlers, in order of registration.\ntype EventBus struct {\n\tgroup *Group\n\tregistered map[eh.EventHandlerType]struct{}\n\tregisteredMu sync.RWMutex\n\terrCh chan eh.EventBusError\n\twg sync.WaitGroup\n\tcodec eh.EventCodec\n}\n\n\/\/ NewEventBus creates a EventBus.\nfunc NewEventBus(options ...Option) *EventBus {\n\tb := &EventBus{\n\t\tgroup: NewGroup(),\n\t\tregistered: map[eh.EventHandlerType]struct{}{},\n\t\terrCh: make(chan eh.EventBusError, 100),\n\t\tcodec: &json.EventCodec{},\n\t}\n\n\t\/\/ Apply configuration options.\n\tfor _, option := range options {\n\t\tif option == nil {\n\t\t\tcontinue\n\t\t}\n\t\toption(b)\n\t}\n\n\treturn b\n}\n\n\/\/ Option is an option setter used to configure creation.\ntype Option func(*EventBus)\n\n\/\/ WithCodec uses the specified codec for encoding events.\nfunc WithCodec(codec eh.EventCodec) Option {\n\treturn func(b *EventBus) {\n\t\tb.codec = codec\n\t}\n}\n\n\/\/ WithGroup uses a specified group for transmitting events.\nfunc WithGroup(g *Group) Option {\n\treturn func(b *EventBus) {\n\t\tb.group = g\n\t}\n}\n\n\/\/ HandlerType implements the HandlerType method of the eventhorizon.EventHandler interface.\nfunc (b *EventBus) HandlerType() eh.EventHandlerType {\n\treturn \"eventbus\"\n}\n\n\/\/ HandleEvent implements the HandleEvent method of the eventhorizon.EventHandler interface.\nfunc (b *EventBus) HandleEvent(ctx context.Context, event eh.Event) error {\n\tdata, err := b.codec.MarshalEvent(ctx, event)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not marshal event: %w\", err)\n\t}\n\n\treturn b.group.publish(ctx, data)\n}\n\n\/\/ AddHandler implements the AddHandler method of the eventhorizon.EventBus interface.\nfunc (b *EventBus) AddHandler(ctx context.Context, m eh.EventMatcher, h eh.EventHandler) error {\n\tif m == nil {\n\t\treturn eh.ErrMissingMatcher\n\t}\n\tif h == nil {\n\t\treturn eh.ErrMissingHandler\n\t}\n\n\t\/\/ Check handler existence.\n\tb.registeredMu.Lock()\n\tdefer b.registeredMu.Unlock()\n\tif _, ok := b.registered[h.HandlerType()]; ok {\n\t\treturn eh.ErrHandlerAlreadyAdded\n\t}\n\n\t\/\/ Get or create the channel.\n\tid := h.HandlerType().String()\n\tch := b.group.channel(id)\n\n\t\/\/ Register handler.\n\tb.registered[h.HandlerType()] = struct{}{}\n\n\t\/\/ Handle until context is cancelled.\n\tb.wg.Add(1)\n\tgo b.handle(ctx, m, h, ch)\n\n\treturn nil\n}\n\n\/\/ Errors implements the Errors method of the eventhorizon.EventBus interface.\nfunc (b *EventBus) Errors() <-chan eh.EventBusError {\n\treturn b.errCh\n}\n\n\/\/ Wait for all channels to close in the event bus group\nfunc (b *EventBus) Wait() {\n\tb.wg.Wait()\n\tb.group.close()\n}\n\ntype evt struct {\n\tctxVals map[string]interface{}\n\tevent eh.Event\n}\n\n\/\/ Handles all events coming in on the channel.\nfunc (b *EventBus) handle(ctx context.Context, m eh.EventMatcher, h eh.EventHandler, ch <-chan []byte) {\n\tdefer b.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase data := <-ch:\n\t\t\tevent, ctx, err := b.codec.UnmarshalEvent(ctx, data)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"could not unmarshal event: %w\", err)\n\t\t\t\tselect {\n\t\t\t\tcase b.errCh <- eh.EventBusError{Err: err, Ctx: ctx}:\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"eventhorizon: missed error in local event bus: %s\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ Ignore non-matching events.\n\t\t\tif !m.Match(event) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Handle the event if it did match.\n\t\t\tif err := h.HandleEvent(ctx, event); err != nil {\n\t\t\t\terr = fmt.Errorf(\"could not handle event (%s): %s\", h.HandlerType(), err.Error())\n\t\t\t\tselect {\n\t\t\t\tcase b.errCh <- eh.EventBusError{Err: err, Ctx: ctx, Event: event}:\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"eventhorizon: missed error in local event bus: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Group is a publishing group shared by multiple event busses locally, if needed.\ntype Group struct {\n\tbus map[string]chan []byte\n\tbusMu sync.RWMutex\n}\n\n\/\/ NewGroup creates a Group.\nfunc NewGroup() *Group {\n\treturn &Group{\n\t\tbus: map[string]chan []byte{},\n\t}\n}\n\nfunc (g *Group) channel(id string) <-chan []byte {\n\tg.busMu.Lock()\n\tdefer g.busMu.Unlock()\n\n\tif ch, ok := g.bus[id]; ok {\n\t\treturn ch\n\t}\n\n\tch := make(chan []byte, DefaultQueueSize)\n\tg.bus[id] = ch\n\treturn ch\n}\n\nfunc (g *Group) publish(ctx context.Context, b []byte) error {\n\tg.busMu.RLock()\n\tdefer g.busMu.RUnlock()\n\n\tfor _, ch := range g.bus {\n\t\t\/\/ Marshal and unmarshal the context to both simulate only sending data\n\t\t\/\/ that would be sent over a network bus and also break any relationship\n\t\t\/\/ with the old context.\n\t\tselect {\n\t\tcase ch <- b:\n\t\tdefault:\n\t\t\tlog.Printf(\"eventhorizon: publish queue full in local event bus\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Closes all the open channels after handling is done.\nfunc (g *Group) close() {\n\tfor _, ch := range g.bus {\n\t\tclose(ch)\n\t}\n\tg.bus = nil\n}\nfix: Simulate network delay in local event bus\/\/ Copyright (c) 2018 - The Event Horizon authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage local\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\teh \"github.com\/looplab\/eventhorizon\"\n\t\"github.com\/looplab\/eventhorizon\/codec\/json\"\n)\n\n\/\/ DefaultQueueSize is the default queue size per handler for publishing events.\nvar DefaultQueueSize = 10\n\n\/\/ EventBus is a local event bus that delegates handling of published events\n\/\/ to all matching registered handlers, in order of registration.\ntype EventBus struct {\n\tgroup *Group\n\tregistered map[eh.EventHandlerType]struct{}\n\tregisteredMu sync.RWMutex\n\terrCh chan eh.EventBusError\n\twg sync.WaitGroup\n\tcodec eh.EventCodec\n}\n\n\/\/ NewEventBus creates a EventBus.\nfunc NewEventBus(options ...Option) *EventBus {\n\tb := &EventBus{\n\t\tgroup: NewGroup(),\n\t\tregistered: map[eh.EventHandlerType]struct{}{},\n\t\terrCh: make(chan eh.EventBusError, 100),\n\t\tcodec: &json.EventCodec{},\n\t}\n\n\t\/\/ Apply configuration options.\n\tfor _, option := range options {\n\t\tif option == nil {\n\t\t\tcontinue\n\t\t}\n\t\toption(b)\n\t}\n\n\treturn b\n}\n\n\/\/ Option is an option setter used to configure creation.\ntype Option func(*EventBus)\n\n\/\/ WithCodec uses the specified codec for encoding events.\nfunc WithCodec(codec eh.EventCodec) Option {\n\treturn func(b *EventBus) {\n\t\tb.codec = codec\n\t}\n}\n\n\/\/ WithGroup uses a specified group for transmitting events.\nfunc WithGroup(g *Group) Option {\n\treturn func(b *EventBus) {\n\t\tb.group = g\n\t}\n}\n\n\/\/ HandlerType implements the HandlerType method of the eventhorizon.EventHandler interface.\nfunc (b *EventBus) HandlerType() eh.EventHandlerType {\n\treturn \"eventbus\"\n}\n\n\/\/ HandleEvent implements the HandleEvent method of the eventhorizon.EventHandler interface.\nfunc (b *EventBus) HandleEvent(ctx context.Context, event eh.Event) error {\n\tdata, err := b.codec.MarshalEvent(ctx, event)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not marshal event: %w\", err)\n\t}\n\n\treturn b.group.publish(ctx, data)\n}\n\n\/\/ AddHandler implements the AddHandler method of the eventhorizon.EventBus interface.\nfunc (b *EventBus) AddHandler(ctx context.Context, m eh.EventMatcher, h eh.EventHandler) error {\n\tif m == nil {\n\t\treturn eh.ErrMissingMatcher\n\t}\n\tif h == nil {\n\t\treturn eh.ErrMissingHandler\n\t}\n\n\t\/\/ Check handler existence.\n\tb.registeredMu.Lock()\n\tdefer b.registeredMu.Unlock()\n\tif _, ok := b.registered[h.HandlerType()]; ok {\n\t\treturn eh.ErrHandlerAlreadyAdded\n\t}\n\n\t\/\/ Get or create the channel.\n\tid := h.HandlerType().String()\n\tch := b.group.channel(id)\n\n\t\/\/ Register handler.\n\tb.registered[h.HandlerType()] = struct{}{}\n\n\t\/\/ Handle until context is cancelled.\n\tb.wg.Add(1)\n\tgo b.handle(ctx, m, h, ch)\n\n\treturn nil\n}\n\n\/\/ Errors implements the Errors method of the eventhorizon.EventBus interface.\nfunc (b *EventBus) Errors() <-chan eh.EventBusError {\n\treturn b.errCh\n}\n\n\/\/ Wait for all channels to close in the event bus group\nfunc (b *EventBus) Wait() {\n\tb.wg.Wait()\n\tb.group.close()\n}\n\ntype evt struct {\n\tctxVals map[string]interface{}\n\tevent eh.Event\n}\n\n\/\/ Handles all events coming in on the channel.\nfunc (b *EventBus) handle(ctx context.Context, m eh.EventMatcher, h eh.EventHandler, ch <-chan []byte) {\n\tdefer b.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase data := <-ch:\n\t\t\t\/\/ Artificial delay to simulate network.\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\n\t\t\tevent, ctx, err := b.codec.UnmarshalEvent(ctx, data)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"could not unmarshal event: %w\", err)\n\t\t\t\tselect {\n\t\t\t\tcase b.errCh <- eh.EventBusError{Err: err, Ctx: ctx}:\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"eventhorizon: missed error in local event bus: %s\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ Ignore non-matching events.\n\t\t\tif !m.Match(event) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Handle the event if it did match.\n\t\t\tif err := h.HandleEvent(ctx, event); err != nil {\n\t\t\t\terr = fmt.Errorf(\"could not handle event (%s): %s\", h.HandlerType(), err.Error())\n\t\t\t\tselect {\n\t\t\t\tcase b.errCh <- eh.EventBusError{Err: err, Ctx: ctx, Event: event}:\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"eventhorizon: missed error in local event bus: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Group is a publishing group shared by multiple event busses locally, if needed.\ntype Group struct {\n\tbus map[string]chan []byte\n\tbusMu sync.RWMutex\n}\n\n\/\/ NewGroup creates a Group.\nfunc NewGroup() *Group {\n\treturn &Group{\n\t\tbus: map[string]chan []byte{},\n\t}\n}\n\nfunc (g *Group) channel(id string) <-chan []byte {\n\tg.busMu.Lock()\n\tdefer g.busMu.Unlock()\n\n\tif ch, ok := g.bus[id]; ok {\n\t\treturn ch\n\t}\n\n\tch := make(chan []byte, DefaultQueueSize)\n\tg.bus[id] = ch\n\treturn ch\n}\n\nfunc (g *Group) publish(ctx context.Context, b []byte) error {\n\tg.busMu.RLock()\n\tdefer g.busMu.RUnlock()\n\n\tfor _, ch := range g.bus {\n\t\t\/\/ Marshal and unmarshal the context to both simulate only sending data\n\t\t\/\/ that would be sent over a network bus and also break any relationship\n\t\t\/\/ with the old context.\n\t\tselect {\n\t\tcase ch <- b:\n\t\tdefault:\n\t\t\tlog.Printf(\"eventhorizon: publish queue full in local event bus\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Closes all the open channels after handling is done.\nfunc (g *Group) close() {\n\tfor _, ch := range g.bus {\n\t\tclose(ch)\n\t}\n\tg.bus = nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"v.io\/jiri\/collect\"\n\t\"v.io\/jiri\/jiri\"\n\t\"v.io\/x\/devtools\/internal\/test\"\n)\n\nvar (\n\tjenkinsHost = \"http:\/\/localhost:8001\/jenkins\"\n)\n\n\/\/ requireEnv makes sure that the given environment variables are set.\nfunc requireEnv(names []string) error {\n\tfor _, name := range names {\n\t\tif os.Getenv(name) == \"\" {\n\t\t\treturn fmt.Errorf(\"environment variable %q is not set\", name)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ vanadiumPresubmitPoll polls vanadium projects for new patchsets for\n\/\/ which to run presubmit tests.\nfunc vanadiumPresubmitPoll(jirix *jiri.X, testName string, _ ...Opt) (_ *test.Result, e error) {\n\t\/\/ Initialize the test.\n\tcleanup, err := initTestImpl(jirix, false, false, testName, nil, \"\")\n\tif err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Use the \"presubmit query\" command to poll for new changes.\n\tlogfile := filepath.Join(jirix.Root, \".presubmit_log\")\n\targs := []string{}\n\tif jirix.Verbose() {\n\t\targs = append(args, \"-v\")\n\t} else {\n\t\t\/\/ append this for testing this CL only - remove on checkin.\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"query\",\n\t\t\"-log-file\", logfile,\n\t\t\"-manifest\", \"tools\",\n\t)\n\tif err := jirix.NewSeq().Capture(jirix.Stdout(), jirix.Stderr()).Last(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\n\/\/ vanadiumPresubmitTest runs presubmit tests for a given project specified\n\/\/ in TEST environment variable.\nfunc vanadiumPresubmitTest(jirix *jiri.X, testName string, _ ...Opt) (_ *test.Result, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"PROJECTS\", \"TEST\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cleanupProfiles(jirix); err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, err := initTestImpl(jirix, false, false, testName, nil, \"\")\n\tif err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\tdisplayProfiles(jirix, \"presubmit\")\n\n\ts := jirix.NewSeq()\n\n\t\/\/ Use the \"presubmit test\" command to run the presubmit test.\n\targs := []string{}\n\tif jirix.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\tname := os.Getenv(\"TEST\")\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"test\",\n\t\t\"-build-number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"tools\",\n\t\t\"-projects\", os.Getenv(\"PROJECTS\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-test\", name,\n\t)\n\tif err := s.Capture(jirix.Stdout(), jirix.Stderr()).Last(\"presubmit\", args...); err != nil {\n\t\treturn nil, newInternalError(err, \"Presubmit\")\n\t}\n\n\t\/\/ Remove any test result files that are empty.\n\ttestResultFiles, err := findTestResultFiles(jirix, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tfileInfo, err := s.Stat(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif fileInfo.Size() == 0 {\n\t\t\tif err := s.RemoveAll(file).Done(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\n\/\/ vanadiumPresubmitResult runs \"presubmit result\" command to process and post test results.\nfunc vanadiumPresubmitResult(jirix *jiri.X, testName string, _ ...Opt) (_ *test.Result, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"PROJECTS\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, err := initTest(jirix, testName, nil)\n\tif err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Run \"presubmit result\".\n\targs := []string{}\n\tif jirix.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"result\",\n\t\t\"-build-number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"tools\",\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-projects\", os.Getenv(\"PROJECTS\"),\n\t)\n\tif err := jirix.NewSeq().Capture(jirix.Stdout(), jirix.Stderr()).Last(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\nTBR: Disable profile cleanup before presubmit tests.\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"v.io\/jiri\/collect\"\n\t\"v.io\/jiri\/jiri\"\n\t\"v.io\/x\/devtools\/internal\/test\"\n)\n\nvar (\n\tjenkinsHost = \"http:\/\/localhost:8001\/jenkins\"\n)\n\n\/\/ requireEnv makes sure that the given environment variables are set.\nfunc requireEnv(names []string) error {\n\tfor _, name := range names {\n\t\tif os.Getenv(name) == \"\" {\n\t\t\treturn fmt.Errorf(\"environment variable %q is not set\", name)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ vanadiumPresubmitPoll polls vanadium projects for new patchsets for\n\/\/ which to run presubmit tests.\nfunc vanadiumPresubmitPoll(jirix *jiri.X, testName string, _ ...Opt) (_ *test.Result, e error) {\n\t\/\/ Initialize the test.\n\tcleanup, err := initTestImpl(jirix, false, false, testName, nil, \"\")\n\tif err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Use the \"presubmit query\" command to poll for new changes.\n\tlogfile := filepath.Join(jirix.Root, \".presubmit_log\")\n\targs := []string{}\n\tif jirix.Verbose() {\n\t\targs = append(args, \"-v\")\n\t} else {\n\t\t\/\/ append this for testing this CL only - remove on checkin.\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"query\",\n\t\t\"-log-file\", logfile,\n\t\t\"-manifest\", \"tools\",\n\t)\n\tif err := jirix.NewSeq().Capture(jirix.Stdout(), jirix.Stderr()).Last(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\n\/\/ vanadiumPresubmitTest runs presubmit tests for a given project specified\n\/\/ in TEST environment variable.\nfunc vanadiumPresubmitTest(jirix *jiri.X, testName string, _ ...Opt) (_ *test.Result, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"PROJECTS\", \"TEST\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, err := initTestImpl(jirix, false, false, testName, nil, \"\")\n\tif err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\tdisplayProfiles(jirix, \"presubmit\")\n\n\ts := jirix.NewSeq()\n\n\t\/\/ Use the \"presubmit test\" command to run the presubmit test.\n\targs := []string{}\n\tif jirix.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\tname := os.Getenv(\"TEST\")\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"test\",\n\t\t\"-build-number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"tools\",\n\t\t\"-projects\", os.Getenv(\"PROJECTS\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-test\", name,\n\t)\n\tif err := s.Capture(jirix.Stdout(), jirix.Stderr()).Last(\"presubmit\", args...); err != nil {\n\t\treturn nil, newInternalError(err, \"Presubmit\")\n\t}\n\n\t\/\/ Remove any test result files that are empty.\n\ttestResultFiles, err := findTestResultFiles(jirix, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tfileInfo, err := s.Stat(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif fileInfo.Size() == 0 {\n\t\t\tif err := s.RemoveAll(file).Done(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\n\/\/ vanadiumPresubmitResult runs \"presubmit result\" command to process and post test results.\nfunc vanadiumPresubmitResult(jirix *jiri.X, testName string, _ ...Opt) (_ *test.Result, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"PROJECTS\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, err := initTest(jirix, testName, nil)\n\tif err != nil {\n\t\treturn nil, newInternalError(err, \"Init\")\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Run \"presubmit result\".\n\targs := []string{}\n\tif jirix.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"result\",\n\t\t\"-build-number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"tools\",\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-projects\", os.Getenv(\"PROJECTS\"),\n\t)\n\tif err := jirix.NewSeq().Capture(jirix.Stdout(), jirix.Stderr()).Last(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n<|endoftext|>"} {"text":"package transit\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\n\/\/ Case1: If batch decryption input is not base64 encoded, it should fail.\nfunc TestTransit_BatchDecryptionCase1(t *testing.T) {\n\tvar resp *logical.Response\n\tvar err error\n\n\tb, s := createBackendWithStorage(t)\n\n\tbatchEncryptionInput := []interface{}{\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\"},\n\t\tmap[string]interface{}{\"plaintext\": \"Cg==\"},\n\t}\n\n\tbatchEncryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchEncryptionInput,\n\t}\n\n\tbatchEncryptionReq := &logical.Request{\n\t\tOperation: logical.CreateOperation,\n\t\tPath: \"encrypt\/upserted_key\",\n\t\tStorage: s,\n\t\tData: batchEncryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchEncryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionData := map[string]interface{}{\n\t\t\"batch_input\": resp.Data[\"batch_results\"],\n\t}\n\n\tbatchDecryptionReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"decrypt\/upserted_key\",\n\t\tStorage: s,\n\t\tData: batchDecryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchDecryptionReq)\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error\")\n\t}\n}\n\n\/\/ Case2: Normal case of batch decryption\nfunc TestTransit_BatchDecryptionCase2(t *testing.T) {\n\tvar resp *logical.Response\n\tvar err error\n\n\tb, s := createBackendWithStorage(t)\n\n\tbatchEncryptionInput := []interface{}{\n\t\tmap[string]interface{}{\"plaintext\": \"Cg==\"},\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\"},\n\t}\n\tbatchEncryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchEncryptionInput,\n\t}\n\n\tbatchEncryptionReq := &logical.Request{\n\t\tOperation: logical.CreateOperation,\n\t\tPath: \"encrypt\/upserted_key\",\n\t\tStorage: s,\n\t\tData: batchEncryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchEncryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchResponseItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\tbatchDecryptionInput := make([]interface{}, len(batchResponseItems))\n\tfor i, item := range batchResponseItems {\n\t\tbatchDecryptionInput[i] = map[string]interface{}{\"ciphertext\": item.Ciphertext}\n\t}\n\tbatchDecryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchDecryptionInput,\n\t}\n\n\tbatchDecryptionReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"decrypt\/upserted_key\",\n\t\tStorage: s,\n\t\tData: batchDecryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchDecryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionResponseItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\n\tplaintext1 := \"dGhlIHF1aWNrIGJyb3duIGZveA==\"\n\tplaintext2 := \"Cg==\"\n\tfor _, item := range batchDecryptionResponseItems {\n\t\tif item.Plaintext != plaintext1 && item.Plaintext != plaintext2 {\n\t\t\tt.Fatalf(\"bad: plaintext: %q\", item.Plaintext)\n\t\t}\n\t}\n}\n\n\/\/ Case3: Test batch decryption with a derived key\nfunc TestTransit_BatchDecryptionCase3(t *testing.T) {\n\tvar resp *logical.Response\n\tvar err error\n\n\tb, s := createBackendWithStorage(t)\n\n\tpolicyData := map[string]interface{}{\n\t\t\"derived\": true,\n\t}\n\n\tpolicyReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"keys\/existing_key\",\n\t\tStorage: s,\n\t\tData: policyData,\n\t}\n\n\tresp, err = b.HandleRequest(context.Background(), policyReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchInput := []interface{}{\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\", \"context\": \"dGVzdGNvbnRleHQ=\"},\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\", \"context\": \"dGVzdGNvbnRleHQ=\"},\n\t}\n\n\tbatchData := map[string]interface{}{\n\t\t\"batch_input\": batchInput,\n\t}\n\tbatchReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"encrypt\/existing_key\",\n\t\tStorage: s,\n\t\tData: batchData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionInputItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\n\tbatchDecryptionInput := make([]interface{}, len(batchDecryptionInputItems))\n\tfor i, item := range batchDecryptionInputItems {\n\t\tbatchDecryptionInput[i] = map[string]interface{}{\"ciphertext\": item.Ciphertext, \"context\": \"dGVzdGNvbnRleHQ=\"}\n\t}\n\n\tbatchDecryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchDecryptionInput,\n\t}\n\n\tbatchDecryptionReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"decrypt\/existing_key\",\n\t\tStorage: s,\n\t\tData: batchDecryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchDecryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionResponseItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\n\tplaintext := \"dGhlIHF1aWNrIGJyb3duIGZveA==\"\n\tfor _, item := range batchDecryptionResponseItems {\n\t\tif item.Plaintext != plaintext {\n\t\t\tt.Fatalf(\"bad: plaintext. Expected: %q, Actual: %q\", plaintext, item.Plaintext)\n\t\t}\n\t}\n}\nRemove unnecessary test (#5483)package transit\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nfunc TestTransit_BatchDecryption(t *testing.T) {\n\tvar resp *logical.Response\n\tvar err error\n\n\tb, s := createBackendWithStorage(t)\n\n\tbatchEncryptionInput := []interface{}{\n\t\tmap[string]interface{}{\"plaintext\": \"Cg==\"},\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\"},\n\t}\n\tbatchEncryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchEncryptionInput,\n\t}\n\n\tbatchEncryptionReq := &logical.Request{\n\t\tOperation: logical.CreateOperation,\n\t\tPath: \"encrypt\/upserted_key\",\n\t\tStorage: s,\n\t\tData: batchEncryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchEncryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchResponseItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\tbatchDecryptionInput := make([]interface{}, len(batchResponseItems))\n\tfor i, item := range batchResponseItems {\n\t\tbatchDecryptionInput[i] = map[string]interface{}{\"ciphertext\": item.Ciphertext}\n\t}\n\tbatchDecryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchDecryptionInput,\n\t}\n\n\tbatchDecryptionReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"decrypt\/upserted_key\",\n\t\tStorage: s,\n\t\tData: batchDecryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchDecryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionResponseItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\n\tplaintext1 := \"dGhlIHF1aWNrIGJyb3duIGZveA==\"\n\tplaintext2 := \"Cg==\"\n\tfor _, item := range batchDecryptionResponseItems {\n\t\tif item.Plaintext != plaintext1 && item.Plaintext != plaintext2 {\n\t\t\tt.Fatalf(\"bad: plaintext: %q\", item.Plaintext)\n\t\t}\n\t}\n}\n\nfunc TestTransit_BatchDecryption_DerivedKey(t *testing.T) {\n\tvar resp *logical.Response\n\tvar err error\n\n\tb, s := createBackendWithStorage(t)\n\n\tpolicyData := map[string]interface{}{\n\t\t\"derived\": true,\n\t}\n\n\tpolicyReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"keys\/existing_key\",\n\t\tStorage: s,\n\t\tData: policyData,\n\t}\n\n\tresp, err = b.HandleRequest(context.Background(), policyReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchInput := []interface{}{\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\", \"context\": \"dGVzdGNvbnRleHQ=\"},\n\t\tmap[string]interface{}{\"plaintext\": \"dGhlIHF1aWNrIGJyb3duIGZveA==\", \"context\": \"dGVzdGNvbnRleHQ=\"},\n\t}\n\n\tbatchData := map[string]interface{}{\n\t\t\"batch_input\": batchInput,\n\t}\n\tbatchReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"encrypt\/existing_key\",\n\t\tStorage: s,\n\t\tData: batchData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionInputItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\n\tbatchDecryptionInput := make([]interface{}, len(batchDecryptionInputItems))\n\tfor i, item := range batchDecryptionInputItems {\n\t\tbatchDecryptionInput[i] = map[string]interface{}{\"ciphertext\": item.Ciphertext, \"context\": \"dGVzdGNvbnRleHQ=\"}\n\t}\n\n\tbatchDecryptionData := map[string]interface{}{\n\t\t\"batch_input\": batchDecryptionInput,\n\t}\n\n\tbatchDecryptionReq := &logical.Request{\n\t\tOperation: logical.UpdateOperation,\n\t\tPath: \"decrypt\/existing_key\",\n\t\tStorage: s,\n\t\tData: batchDecryptionData,\n\t}\n\tresp, err = b.HandleRequest(context.Background(), batchDecryptionReq)\n\tif err != nil || (resp != nil && resp.IsError()) {\n\t\tt.Fatalf(\"err:%v resp:%#v\", err, resp)\n\t}\n\n\tbatchDecryptionResponseItems := resp.Data[\"batch_results\"].([]BatchResponseItem)\n\n\tplaintext := \"dGhlIHF1aWNrIGJyb3duIGZveA==\"\n\tfor _, item := range batchDecryptionResponseItems {\n\t\tif item.Plaintext != plaintext {\n\t\t\tt.Fatalf(\"bad: plaintext. Expected: %q, Actual: %q\", plaintext, item.Plaintext)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright © 2016 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage account\n\nimport (\n\t\"time\"\n\n\t\"github.com\/TheThingsNetwork\/ttn\/core\/types\"\n)\n\n\/\/ Application represents an application on The Things Network\ntype Application struct {\n\tID string `json:\"id\" valid:\"required\"`\n\tName string `json:\"name\" valid:\"required\"`\n\tEUIs []types.AppEUI `json:\"euis,omitempty\"`\n\tAccessKeys []types.AccessKey `json:\"access_keys,omitempty\"`\n\tCreated time.Time `json:\"created,omitempty\"`\n\tCollaborators []Collaborator `json:\"collaborators,omitempty\"`\n}\n\n\/\/ Collaborator is a user that has rights to a certain application\ntype Collaborator struct {\n\tUsername string `json:\"username\" valid:\"required\"`\n\tRights []types.Right `json:\"rights\" valid:\"required\"`\n}\n\n\/\/ HasRight checks if the collaborator has a specific right\nfunc (c *Collaborator) HasRight(right types.Right) bool {\n\tfor _, r := range c.Rights {\n\t\tif r == right {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Profile represents the profile of a user\ntype Profile struct {\n\tUsername string `json:\"username\"`\n\tEmail string `json:\"email\"`\n\tName *Name `json:\"name\"`\n}\n\n\/\/ Name represents the full name of a user\ntype Name struct {\n\tFirst string `json:\"first\"`\n\tLast string `json:\"last\"`\n}\n\n\/\/ Component represents a component on the newtork\ntype Component struct {\n\tType string `json:\"type\"`\n\tID string `json:\"id\"`\n\tCreated time.Time `json:\"created,omitempty\"`\n}\n\n\/\/ String implements the Stringer interface for Name\nfunc (n *Name) String() string {\n\treturn n.First + \" \" + n.Last\n}\n\n\/\/ Gateway represents a gateway on the account server\ntype Gateway struct {\n\t\/\/ ID is the id of the gateway\n\tID string `json:\"id\"`\n\n\t\/\/ EUI is the eui of the gateway\n\tEUI []types.GatewayEUI `json:\"eui,omitempty\"`\n\n\t\/\/ Location is the location of the gateway\n\tLocation *Location `json:\"location,omitempty\"`\n\n\t\/\/ Country is the country code where the gateway is located\n\tCountry string `json:\"country\"`\n\n\t\/\/ Activated denotes wether or not the gateway has been activated yet\n\tActivated bool `json:\"activated\"`\n\n\t\/\/ Owner is the user that owns the gateway\n\tOwner string `json:\"owner,omitempty\"`\n\n\t\/\/ PublicRights are the rights that are publicly available for the gateway\n\tPublicRights []types.Right `json:\"public_rights\"`\n}\n\n\/\/ Location represents a geo location\ntype Location struct {\n\tLng float64 `json:\"lng\"`\n\tLat float64 `json:\"lat\"`\n}\nUpdate gateway type for new gateway enpoint\/\/ Copyright © 2016 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage account\n\nimport (\n\t\"time\"\n\n\t\"github.com\/TheThingsNetwork\/ttn\/core\/types\"\n)\n\n\/\/ Application represents an application on The Things Network\ntype Application struct {\n\tID string `json:\"id\" valid:\"required\"`\n\tName string `json:\"name\" valid:\"required\"`\n\tEUIs []types.AppEUI `json:\"euis,omitempty\"`\n\tAccessKeys []types.AccessKey `json:\"access_keys,omitempty\"`\n\tCreated time.Time `json:\"created,omitempty\"`\n\tCollaborators []Collaborator `json:\"collaborators,omitempty\"`\n}\n\n\/\/ Collaborator is a user that has rights to a certain application\ntype Collaborator struct {\n\tUsername string `json:\"username\" valid:\"required\"`\n\tRights []types.Right `json:\"rights\" valid:\"required\"`\n}\n\n\/\/ HasRight checks if the collaborator has a specific right\nfunc (c *Collaborator) HasRight(right types.Right) bool {\n\tfor _, r := range c.Rights {\n\t\tif r == right {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Profile represents the profile of a user\ntype Profile struct {\n\tUsername string `json:\"username\"`\n\tEmail string `json:\"email\"`\n\tName *Name `json:\"name\"`\n}\n\n\/\/ Name represents the full name of a user\ntype Name struct {\n\tFirst string `json:\"first\"`\n\tLast string `json:\"last\"`\n}\n\n\/\/ Component represents a component on the newtork\ntype Component struct {\n\tType string `json:\"type\"`\n\tID string `json:\"id\"`\n\tCreated time.Time `json:\"created,omitempty\"`\n}\n\n\/\/ String implements the Stringer interface for Name\nfunc (n *Name) String() string {\n\treturn n.First + \" \" + n.Last\n}\n\n\/\/ Gateway represents a gateway on the account server\ntype Gateway struct {\n\tID string `json:\"id\" valid:\"required\"`\n\tEUI types.GatewayEUI `json:\"eui\" valid:\"required\"`\n\tActivated bool `json:\"activated\"`\n\tFrequencyPlan string `json:\"frequency_plan\"`\n\tFrequencyPlanURL string `json:\"frequency_plan_url\"`\n\tPublicLocation bool `json:\"location_public\"`\n\tStatusPublic bool `json:\"status_public\"`\n\tLocation *Location `json:\"location\"`\n\tCollaborators []Collaborator `json:\"collaborator\"`\n\tKey string `json:\"key\"`\n}\n\ntype Location struct {\n\tLng float64 `json:\"lng\"`\n\tLat float64 `json:\"lat\"`\n}\n<|endoftext|>"} {"text":"\/\/ The MIT License (MIT)\n\/\/\n\/\/ Copyright (c) 2013-2015 Oryx(ossrs)\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy of\n\/\/ this software and associated documentation files (the \"Software\"), to deal in\n\/\/ the Software without restriction, including without limitation the rights to\n\/\/ use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\n\/\/ the Software, and to permit persons to whom the Software is furnished to do so,\n\/\/ subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n\/\/ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n\/\/ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n\/\/ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n\/\/ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\/\/ +build darwin dragonfly freebsd nacl netbsd openbsd solaris linux\n\npackage protocol\n\nimport (\n\t\"github.com\/ossrs\/go-oryx\/core\"\n\t\"net\"\n)\n\nfunc (v *RtmpStack) fastSendMessages(iovs ...[]byte) (err error) {\n\t\/\/ we can force to not use writev.\n\tif !core.Conf.Go.Writev {\n\t\treturn v.slowSendMessages(iovs...)\n\t}\n\n\t\/\/ wait for golang to implements the writev.\n\t\/\/ @see https:\/\/github.com\/golang\/go\/issues\/13451\n\t\/\/ private writev, @see https:\/\/github.com\/winlinvip\/go\/pull\/1.\n\tif c, ok := v.out.(*net.TCPConn); ok {\n\t\tif _, err = c.Writev(iovs); err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\treturn v.slowSendMessages(iovs...)\n}\nwait for golang to provide writev.\/\/ The MIT License (MIT)\n\/\/\n\/\/ Copyright (c) 2013-2015 Oryx(ossrs)\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy of\n\/\/ this software and associated documentation files (the \"Software\"), to deal in\n\/\/ the Software without restriction, including without limitation the rights to\n\/\/ use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\n\/\/ the Software, and to permit persons to whom the Software is furnished to do so,\n\/\/ subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n\/\/ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n\/\/ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n\/\/ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n\/\/ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\/\/ +build darwin dragonfly freebsd nacl netbsd openbsd solaris linux\n\npackage protocol\n\nimport (\n\t\"github.com\/ossrs\/go-oryx\/core\"\n\t\"net\"\n)\n\nfunc (v *RtmpStack) fastSendMessages(iovs ...[]byte) (err error) {\n\t\/\/ we can force to not use writev.\n\tif !core.Conf.Go.Writev {\n\t\treturn v.slowSendMessages(iovs...)\n\t}\n\n\t\/\/ wait for golang to implements the writev.\n\t\/\/ @see https:\/\/github.com\/golang\/go\/issues\/13451\n\t\/\/ private writev, @see https:\/\/github.com\/winlinvip\/go\/pull\/1.\n\t\/\/if c, ok := v.out.(*net.TCPConn); ok {\n\t\/\/\tif _, err = c.Writev(iovs); err != nil {\n\t\/\/\t\treturn\n\t\/\/\t}\n\t\/\/\treturn\n\t\/\/}\n\n\treturn v.slowSendMessages(iovs...)\n}\n<|endoftext|>"} {"text":"package koding\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"koding\/migrators\/useroverlay\/token\"\n)\n\nvar (\n\t\/\/ funcMap contains easy to use template functions\n\tfuncMap = template.FuncMap{\n\t\t\"user_keys\": func(keys []string) string {\n\t\t\tif len(keys) == 0 {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\tc := \"ssh_authorized_keys:\\n\"\n\t\t\tfor _, key := range keys {\n\t\t\t\tc += fmt.Sprintf(\" - %s\\n\", strings.TrimSpace(key))\n\t\t\t}\n\t\t\treturn c\n\t\t},\n\t}\n\n\tcloudInitTemplate = template.Must(template.New(\"cloudinit\").Funcs(funcMap).Parse(cloudInit))\n\n\tcloudInit = `\n#cloud-config\noutput : { all : '| tee -a \/var\/log\/cloud-init-output.log' }\ndisable_root: false\ndisable_ec2_metadata: true\nhostname: {{.Hostname}}\n\nbootcmd:\n - [sh, -c, 'echo \"127.0.0.1 {{.Hostname}}\" >> \/etc\/hosts']\n\nusers:\n - default\n - name: {{.Username}}\n groups: sudo\n shell: \/bin\/bash\n gecos: koding user\n lock-password: true\n sudo: ALL=(ALL) NOPASSWD:ALL\n\n\n{{ user_keys .UserSSHKeys }}\n\nwrite_files:\n # Create kite.key\n - content: |\n {{.KiteKey}}\n path: \/etc\/kite\/kite.key\n\n # Apache configuration (\/etc\/apache2\/sites-available\/000-default.conf)\n - content: |\n \n ServerAdmin webmaster@localhost\n\n # Rewrite scheme to ws otherwise apache can't do a websocket proxy\n RewriteEngine on\n RewriteCond %{HTTP:UPGRADE} ^WebSocket$ [NC]\n RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]\n RewriteRule .* ws:\/\/localhost:{{.KitePort}}%{REQUEST_URI} [P]\n\n # Proxy \/kite path to our klient kite\n ProxyRequests Off\n ProxyPass \/kite http:\/\/localhost:{{.KitePort}}\/kite keepalive=On\n ProxyPassReverse \/kite http:\/\/localhost:{{.KitePort}}\/kite\n\n DocumentRoot \/var\/www\n \n Options +FollowSymLinks\n AllowOverride None\n <\/Directory>\n \n Options +Indexes +FollowSymLinks +MultiViews +ExecCGI\n AddHandler cgi-script .cgi .pl .rb .py\n AllowOverride All\n Order allow,deny\n Allow from all\n <\/Directory>\n\n ScriptAlias \/cgi-bin\/ \/usr\/lib\/cgi-bin\/\n \n AllowOverride None\n Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch\n Order allow,deny\n Allow from all\n <\/Directory>\n\n ErrorLog ${APACHE_LOG_DIR}\/error.log\n\n # Possible values include: debug, info, notice, warn, error, crit,\n # alert, emerg.\n LogLevel warn\n\n CustomLog ${APACHE_LOG_DIR}\/access.log combined\n <\/VirtualHost>\n path: \/etc\/apache2\/sites-available\/000-default.conf\n\n{{if .ShouldMigrate }}\n # User migration script (~\/migrate.sh)\n - content: |\n #!\/bin\/bash\n username={{ .Username }}\n credentials=({{ .Passwords }})\n vm_names=({{ .VmNames }})\n vm_ids=({{ .VmIds }})\n count=$((${#credentials[@]} - 1))\n counter=0\n clear\n if [ -f \/etc\/koding\/.kodingart.txt ]; then\n cat \/etc\/koding\/.kodingart.txt\n fi\n echo\n echo 'This migration assistant will help you move your VMs from the old Koding'\n echo 'environment to the new one. For each VM that you have, we will copy your'\n echo 'home directory (and any files you have changed) from the old VM into a'\n echo 'Backup directory on the new one.'\n echo\n echo 'Please note:'\n echo ' - This script will copy changed files on the old VM and place them in '\n echo ' the Backup directory of the new VM'\n echo ' - This script will NOT install or configure any software'\n echo ' - This script will NOT place any files outside your home directory.'\n echo ' You will need to move those files yourself.'\n echo ' - This script will NOT start any servers or configure any ports.'\n echo\n echo \"Your VMs:\"\n echo\n for vm in \"${vm_names[@]}\"; do\n echo \" - [$counter] $vm\"\n let counter=counter+1\n done\n echo\n index=''\n while [[ ! $index =~ ^[0-9]+$ || $index -ge $counter ]]; do\n echo -n \"Which vm would you like to migrate? (0-$count) \"\n read index\n done\n vm_name=\"${vm_names[$index]}\"\n echo\n echo \"Downloading files from $vm_name (this could take a while)...\"\n echo\n archive=\"$vm_name.tgz\"\n echo \"-XPOST -u $username:${credentials[$index]} -d vm=${vm_ids[$index]} --insecure https:\/\/kontainer12.sj.koding.com:3000\/export-files\" | xargs curl > $archive\n echo\n echo \"Extracting your files to directory $(pwd)\/$vm_name...\"\n mkdir -p Backup\/$vm_name\n tar -xzvf $archive -C $vm_name --strip-components 1 > \/dev\/null\n rm $archive\n echo\n echo \"You have successfully migrated $vm_name to the new Koding environment.\"\n echo \"The files have been placed in \/home\/$username\/Backup\/$vm_name. Please use\"\n echo 'the unzip command to access the files and then move or copy them into the'\n echo 'appropriate directories in your new VM.'\n echo\n path: \/home\/{{.Username}}\/migrate.sh\n permissions: '0755'\n owner: {{.Username}}:{{.Username}}\n{{end}}\n\nruncmd:\n # Install & Configure klient\n - [wget, \"{{.LatestKlientURL}}\", -O, \/tmp\/latest-klient.deb]\n - [dpkg, -i, \/tmp\/latest-klient.deb]\n - [chown, -R, '{{.Username}}:{{.Username}}', \/opt\/kite\/klient]\n - service klient stop\n - [sed, -i, 's\/\\.\\\/klient\/sudo -E -u {{.Username}} \\.\\\/klient\/g', \/etc\/init\/klient.conf]\n - service klient start\n - [rm, -f, \/tmp\/latest-klient.deb]\n\n # Configure user's home directory\n - [sh, -c, 'cp -r \/opt\/koding\/userdata\/* \/home\/{{.Username}}\/']\n - [chown, -R, '{{.Username}}:{{.Username}}', \/home\/{{.Username}}\/]\n - [chmod, +x, \/home\/{{.Username}}\/Web\/perl.pl]\n - [chmod, +x, \/home\/{{.Username}}\/Web\/python.py]\n - [chmod, +x, \/home\/{{.Username}}\/Web\/ruby.rb]\n - [rm, -rf, \/opt\/koding\/userdata]\n\n # Configure Apache to serve user's web content\n - [rm, -rf, \/var\/www]\n - [ln, -s, \/home\/{{.Username}}\/Web, \/var\/www]\n - a2enmod cgi\n - service apache2 restart\n\n\nfinal_message: \"All done!\"\n`\n)\n\ntype CloudInitConfig struct {\n\tUsername string\n\tUserSSHKeys []string\n\tHostname string\n\tKiteKey string\n\tLatestKlientURL string \/\/ URL of the latest version of the Klient package\n\tApachePort int \/\/ Defines the base apache running port, should be 80 or 443\n\tKitePort int \/\/ Defines the running kite port, like 3000\n\n\t\/\/ Needed for migrate.sh script\n\tPasswords string\n\tVmNames string\n\tVmIds string\n\tShouldMigrate bool\n\n\tTest bool\n}\n\nfunc (c *CloudInitConfig) setupMigrateScript() {\n\t\/\/ FIXME: Hack. Revise here.\n\tif c.Test {\n\t\treturn\n\t}\n\tvms, err := modelhelper.GetUserVMs(c.Username)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(vms) == 0 {\n\t\treturn\n\t}\n\n\tpasswords := make([]string, len(vms))\n\tvmIds := make([]string, len(vms))\n\tvmNames := make([]string, len(vms))\n\n\tfor _, vm := range vms {\n\t\tid := vm.Id.Hex()\n\t\tpasswords = append(passwords, token.StringToken(c.Username, id))\n\t\tvmIds = append(vmIds, id)\n\t\tvmNames = append(vmNames, vm.HostnameAlias)\n\t}\n\n\tc.Passwords = strings.Join(passwords, \" \")\n\tc.VmIds = strings.Join(vmIds, \" \")\n\tc.VmNames = strings.Join(vmNames, \" \")\n\n\tc.ShouldMigrate = true\n}\nmigrate: use more general subdomain for migration endpointpackage koding\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"koding\/migrators\/useroverlay\/token\"\n)\n\nvar (\n\t\/\/ funcMap contains easy to use template functions\n\tfuncMap = template.FuncMap{\n\t\t\"user_keys\": func(keys []string) string {\n\t\t\tif len(keys) == 0 {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\tc := \"ssh_authorized_keys:\\n\"\n\t\t\tfor _, key := range keys {\n\t\t\t\tc += fmt.Sprintf(\" - %s\\n\", strings.TrimSpace(key))\n\t\t\t}\n\t\t\treturn c\n\t\t},\n\t}\n\n\tcloudInitTemplate = template.Must(template.New(\"cloudinit\").Funcs(funcMap).Parse(cloudInit))\n\n\tcloudInit = `\n#cloud-config\noutput : { all : '| tee -a \/var\/log\/cloud-init-output.log' }\ndisable_root: false\ndisable_ec2_metadata: true\nhostname: {{.Hostname}}\n\nbootcmd:\n - [sh, -c, 'echo \"127.0.0.1 {{.Hostname}}\" >> \/etc\/hosts']\n\nusers:\n - default\n - name: {{.Username}}\n groups: sudo\n shell: \/bin\/bash\n gecos: koding user\n lock-password: true\n sudo: ALL=(ALL) NOPASSWD:ALL\n\n\n{{ user_keys .UserSSHKeys }}\n\nwrite_files:\n # Create kite.key\n - content: |\n {{.KiteKey}}\n path: \/etc\/kite\/kite.key\n\n # Apache configuration (\/etc\/apache2\/sites-available\/000-default.conf)\n - content: |\n \n ServerAdmin webmaster@localhost\n\n # Rewrite scheme to ws otherwise apache can't do a websocket proxy\n RewriteEngine on\n RewriteCond %{HTTP:UPGRADE} ^WebSocket$ [NC]\n RewriteCond %{HTTP:CONNECTION} ^Upgrade$ [NC]\n RewriteRule .* ws:\/\/localhost:{{.KitePort}}%{REQUEST_URI} [P]\n\n # Proxy \/kite path to our klient kite\n ProxyRequests Off\n ProxyPass \/kite http:\/\/localhost:{{.KitePort}}\/kite keepalive=On\n ProxyPassReverse \/kite http:\/\/localhost:{{.KitePort}}\/kite\n\n DocumentRoot \/var\/www\n \n Options +FollowSymLinks\n AllowOverride None\n <\/Directory>\n \n Options +Indexes +FollowSymLinks +MultiViews +ExecCGI\n AddHandler cgi-script .cgi .pl .rb .py\n AllowOverride All\n Order allow,deny\n Allow from all\n <\/Directory>\n\n ScriptAlias \/cgi-bin\/ \/usr\/lib\/cgi-bin\/\n \n AllowOverride None\n Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch\n Order allow,deny\n Allow from all\n <\/Directory>\n\n ErrorLog ${APACHE_LOG_DIR}\/error.log\n\n # Possible values include: debug, info, notice, warn, error, crit,\n # alert, emerg.\n LogLevel warn\n\n CustomLog ${APACHE_LOG_DIR}\/access.log combined\n <\/VirtualHost>\n path: \/etc\/apache2\/sites-available\/000-default.conf\n\n{{if .ShouldMigrate }}\n # User migration script (~\/migrate.sh)\n - content: |\n #!\/bin\/bash\n username={{ .Username }}\n credentials=({{ .Passwords }})\n vm_names=({{ .VmNames }})\n vm_ids=({{ .VmIds }})\n count=$((${#credentials[@]} - 1))\n counter=0\n clear\n if [ -f \/etc\/koding\/.kodingart.txt ]; then\n cat \/etc\/koding\/.kodingart.txt\n fi\n echo\n echo 'This migration assistant will help you move your VMs from the old Koding'\n echo 'environment to the new one. For each VM that you have, we will copy your'\n echo 'home directory (and any files you have changed) from the old VM into a'\n echo 'Backup directory on the new one.'\n echo\n echo 'Please note:'\n echo ' - This script will copy changed files on the old VM and place them in '\n echo ' the Backup directory of the new VM'\n echo ' - This script will NOT install or configure any software'\n echo ' - This script will NOT place any files outside your home directory.'\n echo ' You will need to move those files yourself.'\n echo ' - This script will NOT start any servers or configure any ports.'\n echo\n echo \"Your VMs:\"\n echo\n for vm in \"${vm_names[@]}\"; do\n echo \" - [$counter] $vm\"\n let counter=counter+1\n done\n echo\n index=''\n while [[ ! $index =~ ^[0-9]+$ || $index -ge $counter ]]; do\n echo -n \"Which vm would you like to migrate? (0-$count) \"\n read index\n done\n vm_name=\"${vm_names[$index]}\"\n echo\n echo \"Downloading files from $vm_name (this could take a while)...\"\n echo\n archive=\"$vm_name.tgz\"\n echo \"-XPOST -u $username:${credentials[$index]} -d vm=${vm_ids[$index]} --insecure https:\/\/migrate.sj.koding.com:3000\/export-files\" | xargs curl > $archive\n echo\n echo \"Extracting your files to directory $(pwd)\/$vm_name...\"\n mkdir -p Backup\/$vm_name\n tar -xzvf $archive -C $vm_name --strip-components 1 > \/dev\/null\n rm $archive\n echo\n echo \"You have successfully migrated $vm_name to the new Koding environment.\"\n echo \"The files have been placed in \/home\/$username\/Backup\/$vm_name. Please use\"\n echo 'the unzip command to access the files and then move or copy them into the'\n echo 'appropriate directories in your new VM.'\n echo\n path: \/home\/{{.Username}}\/migrate.sh\n permissions: '0755'\n owner: {{.Username}}:{{.Username}}\n{{end}}\n\nruncmd:\n # Install & Configure klient\n - [wget, \"{{.LatestKlientURL}}\", -O, \/tmp\/latest-klient.deb]\n - [dpkg, -i, \/tmp\/latest-klient.deb]\n - [chown, -R, '{{.Username}}:{{.Username}}', \/opt\/kite\/klient]\n - service klient stop\n - [sed, -i, 's\/\\.\\\/klient\/sudo -E -u {{.Username}} \\.\\\/klient\/g', \/etc\/init\/klient.conf]\n - service klient start\n - [rm, -f, \/tmp\/latest-klient.deb]\n\n # Configure user's home directory\n - [sh, -c, 'cp -r \/opt\/koding\/userdata\/* \/home\/{{.Username}}\/']\n - [chown, -R, '{{.Username}}:{{.Username}}', \/home\/{{.Username}}\/]\n - [chmod, +x, \/home\/{{.Username}}\/Web\/perl.pl]\n - [chmod, +x, \/home\/{{.Username}}\/Web\/python.py]\n - [chmod, +x, \/home\/{{.Username}}\/Web\/ruby.rb]\n - [rm, -rf, \/opt\/koding\/userdata]\n\n # Configure Apache to serve user's web content\n - [rm, -rf, \/var\/www]\n - [ln, -s, \/home\/{{.Username}}\/Web, \/var\/www]\n - a2enmod cgi\n - service apache2 restart\n\n\nfinal_message: \"All done!\"\n`\n)\n\ntype CloudInitConfig struct {\n\tUsername string\n\tUserSSHKeys []string\n\tHostname string\n\tKiteKey string\n\tLatestKlientURL string \/\/ URL of the latest version of the Klient package\n\tApachePort int \/\/ Defines the base apache running port, should be 80 or 443\n\tKitePort int \/\/ Defines the running kite port, like 3000\n\n\t\/\/ Needed for migrate.sh script\n\tPasswords string\n\tVmNames string\n\tVmIds string\n\tShouldMigrate bool\n\n\tTest bool\n}\n\nfunc (c *CloudInitConfig) setupMigrateScript() {\n\t\/\/ FIXME: Hack. Revise here.\n\tif c.Test {\n\t\treturn\n\t}\n\tvms, err := modelhelper.GetUserVMs(c.Username)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(vms) == 0 {\n\t\treturn\n\t}\n\n\tpasswords := make([]string, len(vms))\n\tvmIds := make([]string, len(vms))\n\tvmNames := make([]string, len(vms))\n\n\tfor _, vm := range vms {\n\t\tid := vm.Id.Hex()\n\t\tpasswords = append(passwords, token.StringToken(c.Username, id))\n\t\tvmIds = append(vmIds, id)\n\t\tvmNames = append(vmNames, vm.HostnameAlias)\n\t}\n\n\tc.Passwords = strings.Join(passwords, \" \")\n\tc.VmIds = strings.Join(vmIds, \" \")\n\tc.VmNames = strings.Join(vmNames, \" \")\n\n\tc.ShouldMigrate = true\n}\n<|endoftext|>"} {"text":"package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\tsocialmodels \"socialapi\/models\"\n\t\"socialapi\/request\"\n\t\"socialapi\/workers\/notification\/models\"\n\t\"time\"\n)\n\ntype MailerContainer struct {\n\tActivity *models.NotificationActivity\n\tContent *models.NotificationContent\n\tAccountId int64\n\tMessage string\n\tSlug string\n\tActivityMessage string\n\tObjectType string\n\tGroup GroupContent\n\tCreatedAt time.Time\n}\n\nfunc NewMailerContainer() *MailerContainer {\n\treturn &MailerContainer{}\n}\n\nfunc (mc *MailerContainer) PrepareContainer() error {\n\tif err := mc.validateContainer(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ if content type not valid return\n\tcontentType, err := mc.Content.GetContentType()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar target *socialmodels.ChannelMessage\n\n\tswitch mc.Content.TypeConstant {\n\tcase models.NotificationContent_TYPE_PM:\n\t\ttarget, err = fetchChannelTarget(mc.Content.TargetId)\n\tdefault:\n\t\ttarget, err = fetchMessageTarget(mc.Content.TargetId)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmc.prepareGroup(target)\n\tmc.prepareSlug(target)\n\tmc.prepareObjectType(target)\n\tmc.Message = mc.fetchContentBody(target)\n\tcontentType.SetActorId(target.AccountId)\n\tcontentType.SetListerId(mc.AccountId)\n\n\tmc.ActivityMessage = contentType.GetActivity()\n\n\treturn nil\n}\n\nfunc fetchChannelTarget(channelId int64) (*socialmodels.ChannelMessage, error) {\n\tcml := socialmodels.NewChannelMessageList()\n\tq := request.NewQuery()\n\tq.Limit = 1\n\tmessageIds, err := cml.FetchMessageIdsByChannelId(channelId, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(messageIds) == 0 {\n\t\treturn nil, fmt.Errorf(\"private message not found\")\n\t}\n\n\treturn fetchMessageTarget(messageIds[0])\n}\n\nfunc fetchMessageTarget(messageId int64) (*socialmodels.ChannelMessage, error) {\n\ttarget := socialmodels.NewChannelMessage()\n\tif err := target.ById(messageId); err != nil {\n\t\treturn nil, fmt.Errorf(\"target message not found\")\n\t}\n\n\treturn target, nil\n}\n\nfunc (mc *MailerContainer) validateContainer() error {\n\tif mc.AccountId == 0 {\n\t\treturn errors.New(\"account id is not set\")\n\t}\n\tif mc.Activity == nil {\n\t\treturn errors.New(\"activity is not set\")\n\t}\n\tif mc.Content == nil {\n\t\treturn errors.New(\"content is not set\")\n\t}\n\n\treturn nil\n}\n\nfunc (mc *MailerContainer) prepareGroup(cm *socialmodels.ChannelMessage) {\n\tc := socialmodels.NewChannel()\n\tif err := c.ById(cm.InitialChannelId); err != nil {\n\t\treturn\n\t}\n\t\/\/ TODO fix these Slug and Name\n\tmc.Group = GroupContent{\n\t\tSlug: c.GroupName,\n\t\tName: c.GroupName,\n\t}\n}\n\nfunc (mc *MailerContainer) prepareSlug(cm *socialmodels.ChannelMessage) {\n\tswitch cm.TypeConstant {\n\tcase socialmodels.ChannelMessage_TYPE_REPLY:\n\t\t\/\/ TODO we need append something like comment id to parent message slug\n\t\tmc.Slug = fetchRepliedMessage(cm.Id).Slug\n\tcase socialmodels.ChannelMessage_TYPE_PRIVATE_MESSAGE:\n\t\tmc.Slug = fetchPrivateChannelSlug(cm.Id)\n\tdefault:\n\t\tmc.Slug = cm.Slug\n\t}\n}\n\nfunc (mc *MailerContainer) prepareObjectType(cm *socialmodels.ChannelMessage) {\n\tswitch cm.TypeConstant {\n\tcase socialmodels.ChannelMessage_TYPE_POST:\n\t\tmc.ObjectType = \"status update\"\n\tcase socialmodels.ChannelMessage_TYPE_REPLY:\n\t\tmc.ObjectType = \"comment\"\n\tcase socialmodels.ChannelMessage_TYPE_PRIVATE_MESSAGE:\n\t\tmc.ObjectType = \"private message\"\n\t}\n}\n\nfunc (mc *MailerContainer) fetchContentBody(cm *socialmodels.ChannelMessage) string {\n\tif cm == nil {\n\t\treturn \"\"\n\t}\n\n\tswitch mc.Content.TypeConstant {\n\tcase models.NotificationContent_TYPE_COMMENT:\n\t\treturn fetchLastReplyBody(cm.Id)\n\tdefault:\n\t\treturn cm.Body\n\t}\n}\n\nfunc fetchPrivateChannelSlug(messageId int64) string {\n\tcml := socialmodels.NewChannelMessageList()\n\tids, err := cml.FetchMessageChannelIds(messageId)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tif len(ids) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn fmt.Sprintf(\"Message\/%d\", ids[0])\n}\n\nfunc fetchRepliedMessage(replyId int64) *socialmodels.ChannelMessage {\n\tmr := socialmodels.NewMessageReply()\n\tmr.ReplyId = replyId\n\n\tparent, err := mr.FetchParent()\n\tif err != nil {\n\t\tparent = socialmodels.NewChannelMessage()\n\t}\n\n\treturn parent\n}\n\nfunc fetchLastReplyBody(targetId int64) string {\n\tmr := socialmodels.NewMessageReply()\n\tmr.MessageId = targetId\n\tquery := request.NewQuery()\n\tquery.Limit = 1\n\tmessages, err := mr.List(query)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tif len(messages) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn messages[0].Body\n}\nEmailNotifier: Remove fetchContentBody functionpackage models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\tsocialmodels \"socialapi\/models\"\n\t\"socialapi\/request\"\n\t\"socialapi\/workers\/notification\/models\"\n\t\"time\"\n)\n\ntype MailerContainer struct {\n\tActivity *models.NotificationActivity\n\tContent *models.NotificationContent\n\tAccountId int64\n\tMessage string\n\tSlug string\n\tActivityMessage string\n\tObjectType string\n\tGroup GroupContent\n\tCreatedAt time.Time\n}\n\nfunc NewMailerContainer() *MailerContainer {\n\treturn &MailerContainer{}\n}\n\nfunc (mc *MailerContainer) PrepareContainer() error {\n\tif err := mc.validateContainer(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ if content type not valid return\n\tcontentType, err := mc.Content.GetContentType()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar target *socialmodels.ChannelMessage\n\n\tswitch mc.Content.TypeConstant {\n\tcase models.NotificationContent_TYPE_PM:\n\t\ttarget, err = fetchChannelTarget(mc.Content.TargetId)\n\t\tmc.Message = target.Body\n\tdefault:\n\t\ttarget, err = fetchMessageTarget(mc.Content.TargetId)\n\t\tmc.Message = target.Body\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmc.prepareGroup(target)\n\tmc.prepareSlug(target)\n\tmc.prepareObjectType(target)\n\tcontentType.SetActorId(target.AccountId)\n\tcontentType.SetListerId(mc.AccountId)\n\n\tmc.ActivityMessage = contentType.GetActivity()\n\n\treturn nil\n}\n\nfunc fetchChannelTarget(channelId int64) (*socialmodels.ChannelMessage, error) {\n\tcml := socialmodels.NewChannelMessageList()\n\tq := request.NewQuery()\n\tq.Limit = 1\n\tmessageIds, err := cml.FetchMessageIdsByChannelId(channelId, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(messageIds) == 0 {\n\t\treturn nil, fmt.Errorf(\"private message not found\")\n\t}\n\n\treturn fetchMessageTarget(messageIds[0])\n}\n\nfunc fetchMessageTarget(messageId int64) (*socialmodels.ChannelMessage, error) {\n\ttarget := socialmodels.NewChannelMessage()\n\tif err := target.ById(messageId); err != nil {\n\t\treturn nil, fmt.Errorf(\"target message not found\")\n\t}\n\n\treturn target, nil\n}\n\nfunc (mc *MailerContainer) validateContainer() error {\n\tif mc.AccountId == 0 {\n\t\treturn errors.New(\"account id is not set\")\n\t}\n\tif mc.Activity == nil {\n\t\treturn errors.New(\"activity is not set\")\n\t}\n\tif mc.Content == nil {\n\t\treturn errors.New(\"content is not set\")\n\t}\n\n\treturn nil\n}\n\nfunc (mc *MailerContainer) prepareGroup(cm *socialmodels.ChannelMessage) {\n\tc := socialmodels.NewChannel()\n\tif err := c.ById(cm.InitialChannelId); err != nil {\n\t\treturn\n\t}\n\t\/\/ TODO fix these Slug and Name\n\tmc.Group = GroupContent{\n\t\tSlug: c.GroupName,\n\t\tName: c.GroupName,\n\t}\n}\n\nfunc (mc *MailerContainer) prepareSlug(cm *socialmodels.ChannelMessage) {\n\tswitch cm.TypeConstant {\n\tcase socialmodels.ChannelMessage_TYPE_REPLY:\n\t\t\/\/ TODO we need append something like comment id to parent message slug\n\t\tmc.Slug = fetchRepliedMessage(cm.Id).Slug\n\tcase socialmodels.ChannelMessage_TYPE_PRIVATE_MESSAGE:\n\t\tmc.Slug = fetchPrivateChannelSlug(cm.Id)\n\tdefault:\n\t\tmc.Slug = cm.Slug\n\t}\n}\n\nfunc (mc *MailerContainer) prepareObjectType(cm *socialmodels.ChannelMessage) {\n\tswitch cm.TypeConstant {\n\tcase socialmodels.ChannelMessage_TYPE_POST:\n\t\tmc.ObjectType = \"status update\"\n\tcase socialmodels.ChannelMessage_TYPE_REPLY:\n\t\tmc.ObjectType = \"comment\"\n\tcase socialmodels.ChannelMessage_TYPE_PRIVATE_MESSAGE:\n\t\tmc.ObjectType = \"private message\"\n\t}\n}\n\nfunc fetchPrivateChannelSlug(messageId int64) string {\n\tcml := socialmodels.NewChannelMessageList()\n\tids, err := cml.FetchMessageChannelIds(messageId)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tif len(ids) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn fmt.Sprintf(\"Message\/%d\", ids[0])\n}\n\nfunc fetchRepliedMessage(replyId int64) *socialmodels.ChannelMessage {\n\tmr := socialmodels.NewMessageReply()\n\tmr.ReplyId = replyId\n\n\tparent, err := mr.FetchParent()\n\tif err != nil {\n\t\tparent = socialmodels.NewChannelMessage()\n\t}\n\n\treturn parent\n}\n\nfunc fetchLastReplyBody(targetId int64) string {\n\tmr := socialmodels.NewMessageReply()\n\tmr.MessageId = targetId\n\tquery := request.NewQuery()\n\tquery.Limit = 1\n\tmessages, err := mr.List(query)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tif len(messages) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn messages[0].Body\n}\n<|endoftext|>"} {"text":"package networkcommands\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/jrperritt\/rack\/handler\"\n\t\"github.com\/jrperritt\/rack\/util\"\n\t\"github.com\/rackspace\/gophercloud\/rackspace\/networking\/v2\/networks\"\n)\n\nvar remove = cli.Command{\n\tName: \"delete\",\n\tUsage: util.Usage(commandPrefix, \"delete\", \"\"),\n\tDescription: \"Deletes an existing network\",\n\tAction: actionDelete,\n\tFlags: util.CommandFlags(flagsDelete, keysDelete),\n\tBashComplete: func(c *cli.Context) {\n\t\tutil.CompleteFlags(util.CommandFlags(flagsDelete, keysDelete))\n\t},\n}\n\nfunc flagsDelete() []cli.Flag {\n\treturn []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tUsage: \"[optional; required if `name` or `stdin` isn't provided] The ID of the network\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"name\",\n\t\t\tUsage: \"[optional; required if `id` or `stdin` isn't provided] The name of the network.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"stdin\",\n\t\t\tUsage: \"[optional; required if `name` or `id` isn't provided] The field being piped into STDIN. Valid values are: id\",\n\t\t},\n\t}\n}\n\nvar keysDelete = []string{\"ID\", \"Name\", \"Up\", \"Status\", \"Shared\", \"Tenant ID\"}\n\ntype paramsDelete struct {\n\tnetworkID string\n}\n\ntype commandDelete handler.Command\n\nfunc actionDelete(c *cli.Context) {\n\tcommand := &commandDelete{\n\t\tCtx: &handler.Context{\n\t\t\tCLIContext: c,\n\t\t},\n\t}\n\thandler.Handle(command)\n}\n\nfunc (command *commandDelete) Context() *handler.Context {\n\treturn command.Ctx\n}\n\nfunc (command *commandDelete) Keys() []string {\n\treturn keysDelete\n}\n\nfunc (command *commandDelete) ServiceClientType() string {\n\treturn serviceClientType\n}\n\nfunc (command *commandDelete) HandleFlags(resource *handler.Resource) error {\n\tresource.Params = ¶msDelete{}\n\treturn nil\n}\n\nfunc (command *commandDelete) HandlePipe(resource *handler.Resource, item string) error {\n\tresource.Params.(*paramsDelete).networkID = item\n\treturn nil\n}\n\nfunc (command *commandDelete) HandleSingle(resource *handler.Resource) error {\n\tid := command.Ctx.CLIContext.String(\"id\")\n\tresource.Params.(*paramsDelete).networkID = id\n\treturn nil\n}\n\nfunc (command *commandDelete) Execute(resource *handler.Resource) {\n\tnetworkID := resource.Params.(*paramsDelete).networkID\n\terr := networks.Delete(command.Ctx.ServiceClient, networkID).ExtractErr()\n\tif err != nil {\n\t\tresource.Err = err\n\t\treturn\n\t}\n\tresource.Result = fmt.Sprintf(\"Successfully deleted network [%s] \\n\", networkID)\n}\n\nfunc (command *commandDelete) StdinField() string {\n\treturn \"id\"\n}\nremove keys from 'network delete'package networkcommands\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/jrperritt\/rack\/handler\"\n\t\"github.com\/jrperritt\/rack\/util\"\n\t\"github.com\/rackspace\/gophercloud\/rackspace\/networking\/v2\/networks\"\n)\n\nvar remove = cli.Command{\n\tName: \"delete\",\n\tUsage: util.Usage(commandPrefix, \"delete\", \"\"),\n\tDescription: \"Deletes an existing network\",\n\tAction: actionDelete,\n\tFlags: util.CommandFlags(flagsDelete, keysDelete),\n\tBashComplete: func(c *cli.Context) {\n\t\tutil.CompleteFlags(util.CommandFlags(flagsDelete, keysDelete))\n\t},\n}\n\nfunc flagsDelete() []cli.Flag {\n\treturn []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tUsage: \"[optional; required if `name` or `stdin` isn't provided] The ID of the network\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"name\",\n\t\t\tUsage: \"[optional; required if `id` or `stdin` isn't provided] The name of the network.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"stdin\",\n\t\t\tUsage: \"[optional; required if `name` or `id` isn't provided] The field being piped into STDIN. Valid values are: id\",\n\t\t},\n\t}\n}\n\nvar keysDelete = []string{}\n\ntype paramsDelete struct {\n\tnetworkID string\n}\n\ntype commandDelete handler.Command\n\nfunc actionDelete(c *cli.Context) {\n\tcommand := &commandDelete{\n\t\tCtx: &handler.Context{\n\t\t\tCLIContext: c,\n\t\t},\n\t}\n\thandler.Handle(command)\n}\n\nfunc (command *commandDelete) Context() *handler.Context {\n\treturn command.Ctx\n}\n\nfunc (command *commandDelete) Keys() []string {\n\treturn keysDelete\n}\n\nfunc (command *commandDelete) ServiceClientType() string {\n\treturn serviceClientType\n}\n\nfunc (command *commandDelete) HandleFlags(resource *handler.Resource) error {\n\tresource.Params = ¶msDelete{}\n\treturn nil\n}\n\nfunc (command *commandDelete) HandlePipe(resource *handler.Resource, item string) error {\n\tresource.Params.(*paramsDelete).networkID = item\n\treturn nil\n}\n\nfunc (command *commandDelete) HandleSingle(resource *handler.Resource) error {\n\tid := command.Ctx.CLIContext.String(\"id\")\n\tresource.Params.(*paramsDelete).networkID = id\n\treturn nil\n}\n\nfunc (command *commandDelete) Execute(resource *handler.Resource) {\n\tnetworkID := resource.Params.(*paramsDelete).networkID\n\terr := networks.Delete(command.Ctx.ServiceClient, networkID).ExtractErr()\n\tif err != nil {\n\t\tresource.Err = err\n\t\treturn\n\t}\n\tresource.Result = fmt.Sprintf(\"Successfully deleted network [%s] \\n\", networkID)\n}\n\nfunc (command *commandDelete) StdinField() string {\n\treturn \"id\"\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017-present The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package releaser implements a set of utilities and a wrapper around Goreleaser\n\/\/ to help automate the Hugo release process.\npackage releaser\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tissueLinkTemplate = \"[#%d](https:\/\/github.com\/gohugoio\/hugo\/issues\/%d)\"\n\tlinkTemplate = \"[%s](%s)\"\n\treleaseNotesMarkdownTemplatePatchRelease = `\n{{ if eq (len .All) 1 }}\nThis is a bug-fix release with one important fix.\n{{ else }}\nThis is a bug-fix release with a couple of important fixes.\n{{ end }}\n{{ range .All }}\n{{- if .GitHubCommit -}}\n* {{ .Subject }} {{ . | commitURL }} {{ . | authorURL }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ else -}}\n* {{ .Subject }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ end -}}\n{{- end }}\n\n\n`\n\treleaseNotesMarkdownTemplate = `\n{{- $contribsPerAuthor := .All.ContribCountPerAuthor -}}\n{{- $docsContribsPerAuthor := .Docs.ContribCountPerAuthor -}}\n\nThis release represents **{{ len .All }} contributions by {{ len $contribsPerAuthor }} contributors** to the main Hugo code base.\n\n{{- if gt (len $contribsPerAuthor) 3 -}}\n{{- $u1 := index $contribsPerAuthor 0 -}}\n{{- $u2 := index $contribsPerAuthor 1 -}}\n{{- $u3 := index $contribsPerAuthor 2 -}}\n{{- $u4 := index $contribsPerAuthor 3 -}}\n{{- $u1.AuthorLink }} leads the Hugo development with a significant amount of contributions, but also a big shoutout to {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their ongoing contributions.\nAnd a big thanks to [@digitalcraftsman](https:\/\/github.com\/digitalcraftsman) and [@onedrawingperday](https:\/\/github.com\/onedrawingperday) for their relentless work on keeping the themes site in pristine condition and to [@davidsneighbour](https:\/\/github.com\/davidsneighbour) and [@kaushalmodi](https:\/\/github.com\/kaushalmodi) for all the great work on the documentation site.\n{{ end }}\nMany have also been busy writing and fixing the documentation in [hugoDocs](https:\/\/github.com\/gohugoio\/hugoDocs), \nwhich has received **{{ len .Docs }} contributions by {{ len $docsContribsPerAuthor }} contributors**.\n{{- if gt (len $docsContribsPerAuthor) 3 -}}\n{{- $u1 := index $docsContribsPerAuthor 0 -}}\n{{- $u2 := index $docsContribsPerAuthor 1 -}}\n{{- $u3 := index $docsContribsPerAuthor 2 -}}\n{{- $u4 := index $docsContribsPerAuthor 3 }} A special thanks to {{ $u1.AuthorLink }}, {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their work on the documentation site.\n{{ end }}\n\nHugo now has:\n\n{{ with .Repo -}}\n* {{ .Stars }}+ [stars](https:\/\/github.com\/gohugoio\/hugo\/stargazers)\n* {{ len .Contributors }}+ [contributors](https:\/\/github.com\/gohugoio\/hugo\/graphs\/contributors)\n{{- end -}}\n{{ with .ThemeCount }}\n* {{ . }}+ [themes](http:\/\/themes.gohugo.io\/)\n{{ end }}\n{{ with .Notes }}\n## Notes\n{{ template \"change-section\" . }}\n{{- end -}}\n## Enhancements\n{{ template \"change-headers\" .Enhancements -}}\n## Fixes\n{{ template \"change-headers\" .Fixes -}}\n\n{{ define \"change-headers\" }}\n{{ $tmplChanges := index . \"templateChanges\" -}}\n{{- $outChanges := index . \"outChanges\" -}}\n{{- $coreChanges := index . \"coreChanges\" -}}\n{{- $otherChanges := index . \"otherChanges\" -}}\n{{- with $tmplChanges -}}\n### Templates\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $outChanges -}}\n### Output\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $coreChanges -}}\n### Core\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $otherChanges -}}\n### Other\n{{ template \"change-section\" . }}\n{{- end -}}\n{{ end }}\n\n\n{{ define \"change-section\" }}\n{{ range . }}\n{{- if .GitHubCommit -}}\n* {{ .Subject }} {{ . | commitURL }} {{ . | authorURL }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ else -}}\n* {{ .Subject }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ end -}}\n{{- end }}\n{{ end }}\n`\n)\n\nvar templateFuncs = template.FuncMap{\n\t\"isPatch\": func(c changeLog) bool {\n\t\treturn !strings.HasSuffix(c.Version, \"0\")\n\t},\n\t\"issue\": func(id int) string {\n\t\treturn fmt.Sprintf(issueLinkTemplate, id, id)\n\t},\n\t\"commitURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.HTMLURL == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, info.Hash, info.GitHubCommit.HTMLURL)\n\t},\n\t\"authorURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.Author.Login == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, \"@\"+info.GitHubCommit.Author.Login, info.GitHubCommit.Author.HTMLURL)\n\t},\n}\n\nfunc writeReleaseNotes(version string, infosMain, infosDocs gitInfos, to io.Writer) error {\n\tclient := newGitHubAPI(\"hugo\")\n\tchanges := gitInfosToChangeLog(infosMain, infosDocs)\n\tchanges.Version = version\n\trepo, err := client.fetchRepo()\n\tif err == nil {\n\t\tchanges.Repo = &repo\n\t}\n\tthemeCount, err := fetchThemeCount()\n\tif err == nil {\n\t\tchanges.ThemeCount = themeCount\n\t}\n\n\tmtempl := releaseNotesMarkdownTemplate\n\n\tif !strings.HasSuffix(version, \"0\") {\n\t\tmtempl = releaseNotesMarkdownTemplatePatchRelease\n\t}\n\n\ttmpl, err := template.New(\"\").Funcs(templateFuncs).Parse(mtempl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tmpl.Execute(to, changes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}\n\nfunc fetchThemeCount() (int, error) {\n\tresp, err := http.Get(\"https:\/\/raw.githubusercontent.com\/gohugoio\/hugoThemes\/master\/.gitmodules\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer resp.Body.Close()\n\n\tb, _ := ioutil.ReadAll(resp.Body)\n\treturn bytes.Count(b, []byte(\"submodule\")), nil\n}\n\nfunc writeReleaseNotesToTmpFile(version string, infosMain, infosDocs gitInfos) (string, error) {\n\tf, err := ioutil.TempFile(\"\", \"hugorelease\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer f.Close()\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, f); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn f.Name(), nil\n}\n\nfunc getReleaseNotesDocsTempDirAndName(version string, final bool) (string, string) {\n\tif final {\n\t\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes-ready.md\", version)\n\t}\n\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes.md\", version)\n}\n\nfunc getReleaseNotesDocsTempFilename(version string, final bool) string {\n\treturn filepath.Join(getReleaseNotesDocsTempDirAndName(version, final))\n}\n\nfunc (r *ReleaseHandler) releaseNotesState(version string) (releaseNotesState, error) {\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, false)\n\t_, err := os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesCreated, nil\n\t}\n\n\tdocsTempPath, name = getReleaseNotesDocsTempDirAndName(version, true)\n\t_, err = os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesReady, nil\n\t}\n\n\tif !os.IsNotExist(err) {\n\t\treturn releaseNotesNone, err\n\t}\n\n\treturn releaseNotesNone, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToTemp(version string, isPatch bool, infosMain, infosDocs gitInfos) (string, error) {\n\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, isPatch)\n\n\tvar (\n\t\tw io.WriteCloser\n\t)\n\n\tif !r.try {\n\t\tos.Mkdir(docsTempPath, os.ModePerm)\n\n\t\tf, err := os.Create(filepath.Join(docsTempPath, name))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tname = f.Name()\n\n\t\tdefer f.Close()\n\n\t\tw = f\n\n\t} else {\n\t\tw = os.Stdout\n\t}\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, w); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn name, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToDocs(title, description, sourceFilename string) (string, error) {\n\ttargetFilename := \"index.md\"\n\tbundleDir := strings.TrimSuffix(filepath.Base(sourceFilename), \"-ready.md\")\n\tcontentDir := hugoFilepath(\"docs\/content\/en\/news\/\" + bundleDir)\n\ttargetFullFilename := filepath.Join(contentDir, targetFilename)\n\n\tif r.try {\n\t\tfmt.Printf(\"Write release notes to \/docs: Bundle %q Dir: %q\\n\", bundleDir, contentDir)\n\t\treturn targetFullFilename, nil\n\t}\n\n\tif err := os.MkdirAll(contentDir, os.ModePerm); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tb, err := ioutil.ReadFile(sourceFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tf, err := os.Create(targetFullFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tfmTail := \"\"\n\tif !strings.HasSuffix(title, \".0\") {\n\t\t\/\/ Bug fix release\n\t\tfmTail = `\nimages:\n- images\/blog\/hugo-bug-poster.png\n`\n\t}\n\n\tif _, err := f.WriteString(fmt.Sprintf(`\n---\ndate: %s\ntitle: %q\ndescription: %q\ncategories: [\"Releases\"]%s\n---\n\n\t`, time.Now().Format(\"2006-01-02\"), title, description, fmTail)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := f.Write(b); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn targetFullFilename, nil\n\n}\nreleaser: Adjust the \"thanks\" section\/\/ Copyright 2017-present The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package releaser implements a set of utilities and a wrapper around Goreleaser\n\/\/ to help automate the Hugo release process.\npackage releaser\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tissueLinkTemplate = \"[#%d](https:\/\/github.com\/gohugoio\/hugo\/issues\/%d)\"\n\tlinkTemplate = \"[%s](%s)\"\n\treleaseNotesMarkdownTemplatePatchRelease = `\n{{ if eq (len .All) 1 }}\nThis is a bug-fix release with one important fix.\n{{ else }}\nThis is a bug-fix release with a couple of important fixes.\n{{ end }}\n{{ range .All }}\n{{- if .GitHubCommit -}}\n* {{ .Subject }} {{ . | commitURL }} {{ . | authorURL }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ else -}}\n* {{ .Subject }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ end -}}\n{{- end }}\n\n\n`\n\treleaseNotesMarkdownTemplate = `\n{{- $contribsPerAuthor := .All.ContribCountPerAuthor -}}\n{{- $docsContribsPerAuthor := .Docs.ContribCountPerAuthor -}}\n\nThis release represents **{{ len .All }} contributions by {{ len $contribsPerAuthor }} contributors** to the main Hugo code base.\n\n{{- if gt (len $contribsPerAuthor) 3 -}}\n{{- $u1 := index $contribsPerAuthor 0 -}}\n{{- $u2 := index $contribsPerAuthor 1 -}}\n{{- $u3 := index $contribsPerAuthor 2 -}}\n{{- $u4 := index $contribsPerAuthor 3 -}}\n{{- $u1.AuthorLink }} leads the Hugo development with a significant amount of contributions, but also a big shoutout to {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their ongoing contributions.\nAnd a big thanks to [@digitalcraftsman](https:\/\/github.com\/digitalcraftsman) and [@onedrawingperday](https:\/\/github.com\/onedrawingperday) for their relentless work on keeping the themes site in pristine condition and to [@davidsneighbour](https:\/\/github.com\/davidsneighbour), [@coliff](https:\/\/github.com\/coliff) and [@kaushalmodi](https:\/\/github.com\/kaushalmodi) for all the great work on the documentation site.\n{{ end }}\nMany have also been busy writing and fixing the documentation in [hugoDocs](https:\/\/github.com\/gohugoio\/hugoDocs), \nwhich has received **{{ len .Docs }} contributions by {{ len $docsContribsPerAuthor }} contributors**.\n{{- if gt (len $docsContribsPerAuthor) 3 -}}\n{{- $u1 := index $docsContribsPerAuthor 0 -}}\n{{- $u2 := index $docsContribsPerAuthor 1 -}}\n{{- $u3 := index $docsContribsPerAuthor 2 -}}\n{{- $u4 := index $docsContribsPerAuthor 3 }} A special thanks to {{ $u1.AuthorLink }}, {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their work on the documentation site.\n{{ end }}\n\nHugo now has:\n\n{{ with .Repo -}}\n* {{ .Stars }}+ [stars](https:\/\/github.com\/gohugoio\/hugo\/stargazers)\n* {{ len .Contributors }}+ [contributors](https:\/\/github.com\/gohugoio\/hugo\/graphs\/contributors)\n{{- end -}}\n{{ with .ThemeCount }}\n* {{ . }}+ [themes](http:\/\/themes.gohugo.io\/)\n{{ end }}\n{{ with .Notes }}\n## Notes\n{{ template \"change-section\" . }}\n{{- end -}}\n## Enhancements\n{{ template \"change-headers\" .Enhancements -}}\n## Fixes\n{{ template \"change-headers\" .Fixes -}}\n\n{{ define \"change-headers\" }}\n{{ $tmplChanges := index . \"templateChanges\" -}}\n{{- $outChanges := index . \"outChanges\" -}}\n{{- $coreChanges := index . \"coreChanges\" -}}\n{{- $otherChanges := index . \"otherChanges\" -}}\n{{- with $tmplChanges -}}\n### Templates\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $outChanges -}}\n### Output\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $coreChanges -}}\n### Core\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $otherChanges -}}\n### Other\n{{ template \"change-section\" . }}\n{{- end -}}\n{{ end }}\n\n\n{{ define \"change-section\" }}\n{{ range . }}\n{{- if .GitHubCommit -}}\n* {{ .Subject }} {{ . | commitURL }} {{ . | authorURL }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ else -}}\n* {{ .Subject }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ end -}}\n{{- end }}\n{{ end }}\n`\n)\n\nvar templateFuncs = template.FuncMap{\n\t\"isPatch\": func(c changeLog) bool {\n\t\treturn !strings.HasSuffix(c.Version, \"0\")\n\t},\n\t\"issue\": func(id int) string {\n\t\treturn fmt.Sprintf(issueLinkTemplate, id, id)\n\t},\n\t\"commitURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.HTMLURL == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, info.Hash, info.GitHubCommit.HTMLURL)\n\t},\n\t\"authorURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.Author.Login == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, \"@\"+info.GitHubCommit.Author.Login, info.GitHubCommit.Author.HTMLURL)\n\t},\n}\n\nfunc writeReleaseNotes(version string, infosMain, infosDocs gitInfos, to io.Writer) error {\n\tclient := newGitHubAPI(\"hugo\")\n\tchanges := gitInfosToChangeLog(infosMain, infosDocs)\n\tchanges.Version = version\n\trepo, err := client.fetchRepo()\n\tif err == nil {\n\t\tchanges.Repo = &repo\n\t}\n\tthemeCount, err := fetchThemeCount()\n\tif err == nil {\n\t\tchanges.ThemeCount = themeCount\n\t}\n\n\tmtempl := releaseNotesMarkdownTemplate\n\n\tif !strings.HasSuffix(version, \"0\") {\n\t\tmtempl = releaseNotesMarkdownTemplatePatchRelease\n\t}\n\n\ttmpl, err := template.New(\"\").Funcs(templateFuncs).Parse(mtempl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tmpl.Execute(to, changes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}\n\nfunc fetchThemeCount() (int, error) {\n\tresp, err := http.Get(\"https:\/\/raw.githubusercontent.com\/gohugoio\/hugoThemes\/master\/.gitmodules\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer resp.Body.Close()\n\n\tb, _ := ioutil.ReadAll(resp.Body)\n\treturn bytes.Count(b, []byte(\"submodule\")), nil\n}\n\nfunc writeReleaseNotesToTmpFile(version string, infosMain, infosDocs gitInfos) (string, error) {\n\tf, err := ioutil.TempFile(\"\", \"hugorelease\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer f.Close()\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, f); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn f.Name(), nil\n}\n\nfunc getReleaseNotesDocsTempDirAndName(version string, final bool) (string, string) {\n\tif final {\n\t\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes-ready.md\", version)\n\t}\n\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes.md\", version)\n}\n\nfunc getReleaseNotesDocsTempFilename(version string, final bool) string {\n\treturn filepath.Join(getReleaseNotesDocsTempDirAndName(version, final))\n}\n\nfunc (r *ReleaseHandler) releaseNotesState(version string) (releaseNotesState, error) {\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, false)\n\t_, err := os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesCreated, nil\n\t}\n\n\tdocsTempPath, name = getReleaseNotesDocsTempDirAndName(version, true)\n\t_, err = os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesReady, nil\n\t}\n\n\tif !os.IsNotExist(err) {\n\t\treturn releaseNotesNone, err\n\t}\n\n\treturn releaseNotesNone, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToTemp(version string, isPatch bool, infosMain, infosDocs gitInfos) (string, error) {\n\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, isPatch)\n\n\tvar (\n\t\tw io.WriteCloser\n\t)\n\n\tif !r.try {\n\t\tos.Mkdir(docsTempPath, os.ModePerm)\n\n\t\tf, err := os.Create(filepath.Join(docsTempPath, name))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tname = f.Name()\n\n\t\tdefer f.Close()\n\n\t\tw = f\n\n\t} else {\n\t\tw = os.Stdout\n\t}\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, w); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn name, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToDocs(title, description, sourceFilename string) (string, error) {\n\ttargetFilename := \"index.md\"\n\tbundleDir := strings.TrimSuffix(filepath.Base(sourceFilename), \"-ready.md\")\n\tcontentDir := hugoFilepath(\"docs\/content\/en\/news\/\" + bundleDir)\n\ttargetFullFilename := filepath.Join(contentDir, targetFilename)\n\n\tif r.try {\n\t\tfmt.Printf(\"Write release notes to \/docs: Bundle %q Dir: %q\\n\", bundleDir, contentDir)\n\t\treturn targetFullFilename, nil\n\t}\n\n\tif err := os.MkdirAll(contentDir, os.ModePerm); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tb, err := ioutil.ReadFile(sourceFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tf, err := os.Create(targetFullFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tfmTail := \"\"\n\tif !strings.HasSuffix(title, \".0\") {\n\t\t\/\/ Bug fix release\n\t\tfmTail = `\nimages:\n- images\/blog\/hugo-bug-poster.png\n`\n\t}\n\n\tif _, err := f.WriteString(fmt.Sprintf(`\n---\ndate: %s\ntitle: %q\ndescription: %q\ncategories: [\"Releases\"]%s\n---\n\n\t`, time.Now().Format(\"2006-01-02\"), title, description, fmTail)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := f.Write(b); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn targetFullFilename, nil\n\n}\n<|endoftext|>"} {"text":"package agent\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype stdioAddr struct{}\n\nfunc (_ *stdioAddr) Network() string {\n\treturn \"stdio\"\n}\n\nfunc (_ *stdioAddr) String() string {\n\treturn \"stdio\"\n}\n\ntype stdioConn struct{}\n\nfunc (_ *stdioConn) Read(b []byte) (int, error) {\n\treturn os.Stdin.Read(b)\n}\n\nfunc (_ *stdioConn) Write(b []byte) (int, error) {\n\treturn os.Stdout.Write(b)\n}\n\n\/\/ Close does NOT implement the net.Conn.Close method. This is unfortunately not\n\/\/ possible with standard input\/output because calling Close on those files\n\/\/ might block if they are being read to or written from. This can very easily\n\/\/ lead to a deadlock if no more input is coming or no more output is going to\n\/\/ be processed. Unfortunately there is no way to implement net.Conn.Close\n\/\/ semantics (which are supposed to unblock Read\/Write operations) with standard\n\/\/ input\/output. For this connection, which is effectively a singleton and will\n\/\/ only be used once and for the lifetime of the process, it's best to just\n\/\/ \"close\" it by simply exiting the process.\nfunc (_ *stdioConn) Close() error {\n\tpanic(\"standard input\/output connections don't support closing\")\n}\n\nfunc (_ *stdioConn) LocalAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (_ *stdioConn) RemoteAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (_ *stdioConn) SetDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (_ *stdioConn) SetReadDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (_ *stdioConn) SetWriteDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\ntype stdioListener struct {\n\tconns chan net.Conn\n}\n\nfunc NewStdioListener() net.Listener {\n\t\/\/ Create a connections channel, with enough space for our lone connection.\n\tconns := make(chan net.Conn, 1)\n\n\t\/\/ Populate the connections.\n\tconns <- &stdioConn{}\n\n\t\/\/ Create the listener.\n\treturn &stdioListener{\n\t\tconns: conns,\n\t}\n}\n\nfunc (l *stdioListener) Accept() (net.Conn, error) {\n\t\/\/ Grab the next connection.\n\tconn, ok := <-l.conns\n\n\t\/\/ If it was already consumed, we've probably been triggered due to close.\n\tif !ok {\n\t\treturn nil, errors.New(\"listener closed\")\n\t}\n\n\t\/\/ Success.\n\treturn conn, nil\n}\n\nfunc (l *stdioListener) Close() error {\n\t\/\/ Close the connections channel, terminating any Accept calls.\n\tclose(l.conns)\n\n\t\/\/ Success.\n\treturn nil\n}\n\nfunc (l *stdioListener) Addr() net.Addr {\n\treturn &stdioAddr{}\n}\n\ntype agentConn struct {\n\tprocess *exec.Cmd\n\tstdin io.WriteCloser\n\tstdout io.Reader\n}\n\nfunc (c *agentConn) Read(p []byte) (int, error) {\n\treturn c.stdout.Read(p)\n}\n\nfunc (c *agentConn) Write(p []byte) (int, error) {\n\treturn c.stdin.Write(p)\n}\n\nfunc (c *agentConn) Close() error {\n\t\/\/ Close the process' standard input.\n\tif err := c.stdin.Close(); err != nil {\n\t\tc.process.Wait()\n\t\treturn err\n\t}\n\n\t\/\/ Wait for the process to terminate.\n\treturn c.process.Wait()\n}\n\nfunc (c *agentConn) LocalAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (c *agentConn) RemoteAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (c *agentConn) SetDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (c *agentConn) SetReadDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (c *agentConn) SetWriteDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\nChanged parameter names for consistency with interface definition.package agent\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype stdioAddr struct{}\n\nfunc (_ *stdioAddr) Network() string {\n\treturn \"stdio\"\n}\n\nfunc (_ *stdioAddr) String() string {\n\treturn \"stdio\"\n}\n\ntype stdioConn struct{}\n\nfunc (_ *stdioConn) Read(b []byte) (int, error) {\n\treturn os.Stdin.Read(b)\n}\n\nfunc (_ *stdioConn) Write(b []byte) (int, error) {\n\treturn os.Stdout.Write(b)\n}\n\n\/\/ Close does NOT implement the net.Conn.Close method. This is unfortunately not\n\/\/ possible with standard input\/output because calling Close on those files\n\/\/ might block if they are being read to or written from. This can very easily\n\/\/ lead to a deadlock if no more input is coming or no more output is going to\n\/\/ be processed. Unfortunately there is no way to implement net.Conn.Close\n\/\/ semantics (which are supposed to unblock Read\/Write operations) with standard\n\/\/ input\/output. For this connection, which is effectively a singleton and will\n\/\/ only be used once and for the lifetime of the process, it's best to just\n\/\/ \"close\" it by simply exiting the process.\nfunc (_ *stdioConn) Close() error {\n\tpanic(\"standard input\/output connections don't support closing\")\n}\n\nfunc (_ *stdioConn) LocalAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (_ *stdioConn) RemoteAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (_ *stdioConn) SetDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (_ *stdioConn) SetReadDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (_ *stdioConn) SetWriteDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\ntype stdioListener struct {\n\tconns chan net.Conn\n}\n\nfunc NewStdioListener() net.Listener {\n\t\/\/ Create a connections channel, with enough space for our lone connection.\n\tconns := make(chan net.Conn, 1)\n\n\t\/\/ Populate the connections.\n\tconns <- &stdioConn{}\n\n\t\/\/ Create the listener.\n\treturn &stdioListener{\n\t\tconns: conns,\n\t}\n}\n\nfunc (l *stdioListener) Accept() (net.Conn, error) {\n\t\/\/ Grab the next connection.\n\tconn, ok := <-l.conns\n\n\t\/\/ If it was already consumed, we've probably been triggered due to close.\n\tif !ok {\n\t\treturn nil, errors.New(\"listener closed\")\n\t}\n\n\t\/\/ Success.\n\treturn conn, nil\n}\n\nfunc (l *stdioListener) Close() error {\n\t\/\/ Close the connections channel, terminating any Accept calls.\n\tclose(l.conns)\n\n\t\/\/ Success.\n\treturn nil\n}\n\nfunc (l *stdioListener) Addr() net.Addr {\n\treturn &stdioAddr{}\n}\n\ntype agentConn struct {\n\tprocess *exec.Cmd\n\tstdin io.WriteCloser\n\tstdout io.Reader\n}\n\nfunc (c *agentConn) Read(b []byte) (int, error) {\n\treturn c.stdout.Read(b)\n}\n\nfunc (c *agentConn) Write(b []byte) (int, error) {\n\treturn c.stdin.Write(b)\n}\n\nfunc (c *agentConn) Close() error {\n\t\/\/ Close the process' standard input.\n\tif err := c.stdin.Close(); err != nil {\n\t\tc.process.Wait()\n\t\treturn err\n\t}\n\n\t\/\/ Wait for the process to terminate.\n\treturn c.process.Wait()\n}\n\nfunc (c *agentConn) LocalAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (c *agentConn) RemoteAddr() net.Addr {\n\treturn &stdioAddr{}\n}\n\nfunc (c *agentConn) SetDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (c *agentConn) SetReadDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n\nfunc (c *agentConn) SetWriteDeadline(_ time.Time) error {\n\treturn errors.New(\"deadlines not supported\")\n}\n<|endoftext|>"} {"text":"package adaptor\n\nimport (\n\t\"log\"\n\t\"os\/exec\"\n)\n\nfunc CleanRunc() {\n\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", \"kill -9 `ps -ef|grep runc|grep -v grep|awk '{print $2}'`\")\n\t_, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"[clean runc] kill process error , %v\", err)\n\t}\n\tcmd = exec.Command(\"\/bin\/bash\", \"-c\", \"rm -r \/run\/oci\/specsValidator\")\n\t_, err = cmd.Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"[clean runc] delete folder error , %v\", err)\n\t}\n\tlog.Println(\"clean runc success\")\n\n}\ndivide clean runc into 2 partspackage adaptor\n\nimport (\n\t\"log\"\n\t\"os\/exec\"\n)\n\nfunc CleanRunc() {\n\tKillRunc()\n\tDeleteRun()\n}\n\nfunc KillRunc() {\n\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", \"kill -9 `ps -ef|grep runc|grep -v grep|awk '{print $2}'`\")\n\t_, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Printf(\"[clean runc] kill process error , %v\", err)\n\t}\n}\n\nfunc DeleteRun() {\n\t_, err := exec.Command(\"\/bin\/bash\", \"-c\", \"ls \/run\/oci\/specsValidator\").Output()\n\tif err == nil {\n\t\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", \"rm -r \/run\/oci\/specsValidator\")\n\t\t_, err = cmd.Output()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[clean runc] delete folder error , %v\", err)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"package acceptance_test\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\tacceptance \"github.com\/cloudfoundry\/bosh-bootloader\/acceptance-tests\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/testhelpers\"\n\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/acceptance-tests\/actors\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"concourse deployment test\", func() {\n\tvar (\n\t\tbbl actors.BBL\n\t\tstate acceptance.State\n\t\tlbURL string\n\t\tconfiguration acceptance.Config\n\t\tboshCLI actors.BOSHCLI\n\t\tsshSession *gexec.Session\n\t\tusername string\n\t\tpassword string\n\t\taddress string\n\t\tcaCertPath string\n\t)\n\n\tBeforeEach(func() {\n\t\tvar err error\n\t\tconfiguration, err = acceptance.LoadConfig()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tbbl = actors.NewBBL(configuration.StateFileDir, pathToBBL, configuration, \"concourse-env\")\n\t\tstate = acceptance.NewState(configuration.StateFileDir)\n\n\t\tsession := bbl.Up(\"--name\", bbl.PredefinedEnvID())\n\t\tEventually(session, 40*time.Minute).Should(gexec.Exit(0))\n\n\t\tcertPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_CERT)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tkeyPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_KEY)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tsession = bbl.CreateLB(\"concourse\", certPath, keyPath, \"\")\n\t\tEventually(session, 10*time.Minute).Should(gexec.Exit(0))\n\n\t\tlbURL, err = actors.LBURL(configuration, bbl, state)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tboshCLI = actors.NewBOSHCLI()\n\n\t\tusername = bbl.DirectorUsername()\n\t\tpassword = bbl.DirectorPassword()\n\t\taddress = bbl.DirectorAddress()\n\t\tcaCertPath = bbl.DirectorCACert()\n\t})\n\n\tAfterEach(func() {\n\t\tif sshSession != nil {\n\t\t\tboshCLI.DeleteDeployment(address, caCertPath, username, password, \"concourse\")\n\t\t\tsshSession.Interrupt()\n\t\t\tEventually(sshSession, \"10s\").Should(gexec.Exit())\n\t\t}\n\t\tsession := bbl.Destroy()\n\t\tEventually(session, 10*time.Minute).Should(gexec.Exit())\n\t})\n\n\tIt(\"is able to deploy concourse and teardown infrastructure\", func() {\n\t\tBy(\"creating an ssh tunnel to the director in print-env\", func() {\n\t\t\tsshSession = bbl.StartSSHTunnel()\n\t\t})\n\n\t\tBy(\"uploading stemcell\", func() {\n\t\t\terr := boshCLI.UploadStemcell(address, caCertPath, username, password, configuration.StemcellPath)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tBy(\"running bosh deploy and checking all the vms are running\", func() {\n\t\t\terr := boshCLI.Deploy(address, caCertPath, username, password, \"concourse\",\n\t\t\t\tfmt.Sprintf(\"%s\/concourse-deployment.yml\", configuration.ConcourseDeploymentPath),\n\t\t\t\t\"concourse-vars.yml\",\n\t\t\t\t[]string{fmt.Sprintf(\"%s\/operations\/%s.yml\", configuration.ConcourseDeploymentPath, configuration.IAAS)},\n\t\t\t\tmap[string]string{\"domain\": lbURL},\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tEventually(func() int {\n\t\t\t\tvms, err := boshCLI.VMs(address, caCertPath, username, password, \"concourse\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\n\t\t\t\treturn strings.Count(vms, \"running\")\n\t\t\t}, \"1m\", \"10s\").Should(Equal(4))\n\t\t})\n\n\t\tBy(\"testing the deployment\", func() {\n\t\t\ttr := &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t\t}\n\t\t\tclient := &http.Client{Transport: tr}\n\n\t\t\tresp, err := client.Get(lbURL)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tExpect(string(body)).To(ContainSubstring(\"Concourse<\/title>\"))\n\t\t})\n\n\t\tBy(\"deleting the deployment\", func() {\n\t\t\terr := boshCLI.DeleteDeployment(address, caCertPath, username, password, \"concourse\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tBy(\"deleting load balancers\", func() {\n\t\t\tsession := bbl.DeleteLBs()\n\t\t\tEventually(session, 15*time.Minute).Should(gexec.Exit(0))\n\t\t})\n\t})\n})\n<commit_msg>Use system_domain instead of domain for concourse<commit_after>package acceptance_test\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\tacceptance \"github.com\/cloudfoundry\/bosh-bootloader\/acceptance-tests\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/testhelpers\"\n\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/acceptance-tests\/actors\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"concourse deployment test\", func() {\n\tvar (\n\t\tbbl actors.BBL\n\t\tstate acceptance.State\n\t\tlbURL string\n\t\tconfiguration acceptance.Config\n\t\tboshCLI actors.BOSHCLI\n\t\tsshSession *gexec.Session\n\t\tusername string\n\t\tpassword string\n\t\taddress string\n\t\tcaCertPath string\n\t)\n\n\tBeforeEach(func() {\n\t\tvar err error\n\t\tconfiguration, err = acceptance.LoadConfig()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tbbl = actors.NewBBL(configuration.StateFileDir, pathToBBL, configuration, \"concourse-env\")\n\t\tstate = acceptance.NewState(configuration.StateFileDir)\n\n\t\tsession := bbl.Up(\"--name\", bbl.PredefinedEnvID())\n\t\tEventually(session, 40*time.Minute).Should(gexec.Exit(0))\n\n\t\tcertPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_CERT)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tkeyPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_KEY)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tsession = bbl.CreateLB(\"concourse\", certPath, keyPath, \"\")\n\t\tEventually(session, 10*time.Minute).Should(gexec.Exit(0))\n\n\t\tlbURL, err = actors.LBURL(configuration, bbl, state)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tboshCLI = actors.NewBOSHCLI()\n\n\t\tusername = bbl.DirectorUsername()\n\t\tpassword = bbl.DirectorPassword()\n\t\taddress = bbl.DirectorAddress()\n\t\tcaCertPath = bbl.DirectorCACert()\n\t})\n\n\tAfterEach(func() {\n\t\tif sshSession != nil {\n\t\t\tboshCLI.DeleteDeployment(address, caCertPath, username, password, \"concourse\")\n\t\t\tsshSession.Interrupt()\n\t\t\tEventually(sshSession, \"10s\").Should(gexec.Exit())\n\t\t}\n\t\tsession := bbl.Destroy()\n\t\tEventually(session, 10*time.Minute).Should(gexec.Exit())\n\t})\n\n\tIt(\"is able to deploy concourse and teardown infrastructure\", func() {\n\t\tBy(\"creating an ssh tunnel to the director in print-env\", func() {\n\t\t\tsshSession = bbl.StartSSHTunnel()\n\t\t})\n\n\t\tBy(\"uploading stemcell\", func() {\n\t\t\terr := boshCLI.UploadStemcell(address, caCertPath, username, password, configuration.StemcellPath)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tBy(\"running bosh deploy and checking all the vms are running\", func() {\n\t\t\terr := boshCLI.Deploy(address, caCertPath, username, password, \"concourse\",\n\t\t\t\tfmt.Sprintf(\"%s\/concourse-deployment.yml\", configuration.ConcourseDeploymentPath),\n\t\t\t\t\"concourse-vars.yml\",\n\t\t\t\t[]string{fmt.Sprintf(\"%s\/operations\/%s.yml\", configuration.ConcourseDeploymentPath, configuration.IAAS)},\n\t\t\t\tmap[string]string{\"system_domain\": lbURL},\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tEventually(func() int {\n\t\t\t\tvms, err := boshCLI.VMs(address, caCertPath, username, password, \"concourse\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\n\t\t\t\treturn strings.Count(vms, \"running\")\n\t\t\t}, \"1m\", \"10s\").Should(Equal(4))\n\t\t})\n\n\t\tBy(\"testing the deployment\", func() {\n\t\t\ttr := &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t\t}\n\t\t\tclient := &http.Client{Transport: tr}\n\n\t\t\tresp, err := client.Get(lbURL)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tExpect(string(body)).To(ContainSubstring(\"<title>Concourse<\/title>\"))\n\t\t})\n\n\t\tBy(\"deleting the deployment\", func() {\n\t\t\terr := boshCLI.DeleteDeployment(address, caCertPath, username, password, \"concourse\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tBy(\"deleting load balancers\", func() {\n\t\t\tsession := bbl.DeleteLBs()\n\t\t\tEventually(session, 15*time.Minute).Should(gexec.Exit(0))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage target_test\n\nimport (\n\t\"testing\"\n)\n\nfunc writeSmallBase(th *KustTestHarness) {\n\tth.writeK(\"\/app\/base\", `\nnamePrefix: a-\ncommonLabels:\n app: myApp\nresources:\n- deployment.yaml\n- service.yaml\n`)\n\tth.writeF(\"\/app\/base\/service.yaml\", `\napiVersion: v1\nkind: Service\nmetadata:\n name: myService\nspec:\n selector:\n backend: bungie\n ports:\n - port: 7002\n`)\n\tth.writeF(\"\/app\/base\/deployment.yaml\", `\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: myDeployment\nspec:\n template:\n metadata:\n labels:\n backend: awesome\n spec:\n containers:\n - name: whatever\n image: whatever\n`)\n}\n\nfunc TestSmallBase(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/base\")\n\twriteSmallBase(th)\n\tm, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err != nil {\n\t\tt.Fatalf(\"Err: %v\", err)\n\t}\n\tth.assertActualEqualsExpected(m, `\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: myApp\n name: a-myService\nspec:\n ports:\n - port: 7002\n selector:\n app: myApp\n backend: bungie\n---\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n labels:\n app: myApp\n name: a-myDeployment\nspec:\n selector:\n matchLabels:\n app: myApp\n template:\n metadata:\n labels:\n app: myApp\n backend: awesome\n spec:\n containers:\n - image: whatever\n name: whatever\n`)\n}\n\nfunc TestSmallOverlay(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/overlay\")\n\twriteSmallBase(th)\n\tth.writeK(\"\/app\/overlay\", `\nnamePrefix: b-\ncommonLabels:\n env: prod\nbases:\n- ..\/base\npatchesStrategicMerge:\n- deployment\/deployment.yaml\nimages:\n- name: whatever\n newTag: 1.8.0\n`)\n\n\tth.writeF(\"\/app\/overlay\/configmap\/app.env\", `\nDB_USERNAME=admin\nDB_PASSWORD=somepw\n`)\n\tth.writeF(\"\/app\/overlay\/configmap\/app-init.ini\", `\nFOO=bar\nBAR=baz\n`)\n\tth.writeF(\"\/app\/overlay\/deployment\/deployment.yaml\", `\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: myDeployment\nspec:\n replicas: 1000\n`)\n\tm, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err != nil {\n\t\tt.Fatalf(\"Err: %v\", err)\n\t}\n\t\/\/ TODO(#669): The name of the patched Deployment is\n\t\/\/ b-a-myDeployment, retaining the base prefix\n\t\/\/ (example of correct behavior).\n\tth.assertActualEqualsExpected(m, `\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: myApp\n env: prod\n name: b-a-myService\nspec:\n ports:\n - port: 7002\n selector:\n app: myApp\n backend: bungie\n env: prod\n---\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n labels:\n app: myApp\n env: prod\n name: b-a-myDeployment\nspec:\n replicas: 1000\n selector:\n matchLabels:\n app: myApp\n env: prod\n template:\n metadata:\n labels:\n app: myApp\n backend: awesome\n env: prod\n spec:\n containers:\n - image: whatever:1.8.0\n name: whatever\n`)\n}\n\nfunc TestSmallOverlayJSONPatch(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/overlay\")\n\twriteSmallBase(th)\n\tth.writeK(\"\/app\/overlay\", `\nbases:\n- ..\/base\npatchesJson6902:\n- target:\n version: v1\n kind: Service\n name: myService # BUG (https:\/\/github.com\/kubernetes-sigs\/kustomize\/issues\/972): this should be a-myService, because that is what the output for the base contains\n path: service-patch.yaml\n`)\n\n\tth.writeF(\"\/app\/overlay\/service-patch.yaml\", `\n- op: add\n path: \/spec\/selector\/backend\n value: beagle\n`)\n\tm, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err != nil {\n\t\tt.Fatalf(\"Err: %v\", err)\n\t}\n\tth.assertActualEqualsExpected(m, `\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: myApp\n name: a-myService\nspec:\n ports:\n - port: 7002\n selector:\n app: myApp\n backend: beagle\n---\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n labels:\n app: myApp\n name: a-myDeployment\nspec:\n selector:\n matchLabels:\n app: myApp\n template:\n metadata:\n labels:\n app: myApp\n backend: awesome\n spec:\n containers:\n - image: whatever\n name: whatever\n`)\n}\n<commit_msg>Add test showing shared patches disallowed.<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage target_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc writeSmallBase(th *KustTestHarness) {\n\tth.writeK(\"\/app\/base\", `\nnamePrefix: a-\ncommonLabels:\n app: myApp\nresources:\n- deployment.yaml\n- service.yaml\n`)\n\tth.writeF(\"\/app\/base\/service.yaml\", `\napiVersion: v1\nkind: Service\nmetadata:\n name: myService\nspec:\n selector:\n backend: bungie\n ports:\n - port: 7002\n`)\n\tth.writeF(\"\/app\/base\/deployment.yaml\", `\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: myDeployment\nspec:\n template:\n metadata:\n labels:\n backend: awesome\n spec:\n containers:\n - name: whatever\n image: whatever\n`)\n}\n\nfunc TestSmallBase(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/base\")\n\twriteSmallBase(th)\n\tm, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err != nil {\n\t\tt.Fatalf(\"Err: %v\", err)\n\t}\n\tth.assertActualEqualsExpected(m, `\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: myApp\n name: a-myService\nspec:\n ports:\n - port: 7002\n selector:\n app: myApp\n backend: bungie\n---\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n labels:\n app: myApp\n name: a-myDeployment\nspec:\n selector:\n matchLabels:\n app: myApp\n template:\n metadata:\n labels:\n app: myApp\n backend: awesome\n spec:\n containers:\n - image: whatever\n name: whatever\n`)\n}\n\nfunc TestSmallOverlay(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/overlay\")\n\twriteSmallBase(th)\n\tth.writeK(\"\/app\/overlay\", `\nnamePrefix: b-\ncommonLabels:\n env: prod\nbases:\n- ..\/base\npatchesStrategicMerge:\n- deployment\/deployment.yaml\nimages:\n- name: whatever\n newTag: 1.8.0\n`)\n\n\tth.writeF(\"\/app\/overlay\/configmap\/app.env\", `\nDB_USERNAME=admin\nDB_PASSWORD=somepw\n`)\n\tth.writeF(\"\/app\/overlay\/configmap\/app-init.ini\", `\nFOO=bar\nBAR=baz\n`)\n\tth.writeF(\"\/app\/overlay\/deployment\/deployment.yaml\", `\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: myDeployment\nspec:\n replicas: 1000\n`)\n\tm, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err != nil {\n\t\tt.Fatalf(\"Err: %v\", err)\n\t}\n\t\/\/ TODO(#669): The name of the patched Deployment is\n\t\/\/ b-a-myDeployment, retaining the base prefix\n\t\/\/ (example of correct behavior).\n\tth.assertActualEqualsExpected(m, `\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: myApp\n env: prod\n name: b-a-myService\nspec:\n ports:\n - port: 7002\n selector:\n app: myApp\n backend: bungie\n env: prod\n---\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n labels:\n app: myApp\n env: prod\n name: b-a-myDeployment\nspec:\n replicas: 1000\n selector:\n matchLabels:\n app: myApp\n env: prod\n template:\n metadata:\n labels:\n app: myApp\n backend: awesome\n env: prod\n spec:\n containers:\n - image: whatever:1.8.0\n name: whatever\n`)\n}\n\nfunc TestSharedPatchDisAllowed(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/overlay\")\n\twriteSmallBase(th)\n\tth.writeK(\"\/app\/overlay\", `\ncommonLabels:\n env: prod\nbases:\n- ..\/base\npatchesStrategicMerge:\n- ..\/shared\/deployment-patch.yaml\n`)\n\tth.writeF(\"\/app\/shared\/deployment-patch.yaml\", `\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: myDeployment\nspec:\n replicas: 1000\n`)\n\t_, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err == nil {\n\t\tt.Fatalf(\"expected error\")\n\t}\n\tif !strings.Contains(\n\t\terr.Error(),\n\t\t\"security; file '..\/shared\/deployment-patch.yaml' is not in or below '\/app\/overlay'\") {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}\n\nfunc TestSmallOverlayJSONPatch(t *testing.T) {\n\tth := NewKustTestHarness(t, \"\/app\/overlay\")\n\twriteSmallBase(th)\n\tth.writeK(\"\/app\/overlay\", `\nbases:\n- ..\/base\npatchesJson6902:\n- target:\n version: v1\n kind: Service\n name: myService # BUG (https:\/\/github.com\/kubernetes-sigs\/kustomize\/issues\/972): this should be a-myService, because that is what the output for the base contains\n path: service-patch.yaml\n`)\n\n\tth.writeF(\"\/app\/overlay\/service-patch.yaml\", `\n- op: add\n path: \/spec\/selector\/backend\n value: beagle\n`)\n\tm, err := th.makeKustTarget().MakeCustomizedResMap()\n\tif err != nil {\n\t\tt.Fatalf(\"Err: %v\", err)\n\t}\n\tth.assertActualEqualsExpected(m, `\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: myApp\n name: a-myService\nspec:\n ports:\n - port: 7002\n selector:\n app: myApp\n backend: beagle\n---\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n labels:\n app: myApp\n name: a-myDeployment\nspec:\n selector:\n matchLabels:\n app: myApp\n template:\n metadata:\n labels:\n app: myApp\n backend: awesome\n spec:\n containers:\n - image: whatever\n name: whatever\n`)\n}<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"github.com\/anonx\/ok\/action\"\n)\n\n\/\/ App is a sample controller that is used for demonstration purposes.\ntype App struct {\n\tController\n}\n\n\/\/ Before is a magic method that is executed before every request.\nfunc (c *App) Before() action.Result {\n\treturn nil\n}\n\n\/\/ Index is an action that is used for generation of a greeting form.\nfunc (c *App) Index() action.Result {\n\treturn c.RenderTemplate(\"test.html\")\n}\n\n\/\/ PostGreet prints received user fullname. If it is not valid,\n\/\/ user is redirected back to index page.\nfunc (c *App) PostGreet(name string) action.Result {\n\treturn nil\n}\n\n\/\/ After is a magic method that is executed after every request.\nfunc (c *App) After() action.Result {\n\treturn nil\n}\n\n\/\/ Finally is a magic method that is executed after every request\n\/\/ no matter what.\nfunc (c *App) Finally() action.Result {\n\treturn nil\n}\n\n\/\/ Init is a system method that will be called once during application's startup.\nfunc (c *App) Init() {\n}\n<commit_msg>Get rid of Init magic method in controllers<commit_after>package controllers\n\nimport (\n\t\"github.com\/anonx\/ok\/action\"\n)\n\n\/\/ App is a sample controller that is used for demonstration purposes.\ntype App struct {\n\tController\n}\n\n\/\/ Before is a magic method that is executed before every request.\nfunc (c *App) Before() action.Result {\n\treturn nil\n}\n\n\/\/ Index is an action that is used for generation of a greeting form.\nfunc (c *App) Index() action.Result {\n\treturn c.RenderTemplate(\"test.html\")\n}\n\n\/\/ PostGreet prints received user fullname. If it is not valid,\n\/\/ user is redirected back to index page.\nfunc (c *App) PostGreet(name string) action.Result {\n\treturn nil\n}\n\n\/\/ After is a magic method that is executed after every request.\nfunc (c *App) After() action.Result {\n\treturn nil\n}\n\n\/\/ Finally is a magic method that is executed after every request\n\/\/ no matter what.\nfunc (c *App) Finally() action.Result {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package providers\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/camptocamp\/bivac\/orchestrators\"\n\t\"github.com\/camptocamp\/bivac\/volume\"\n)\n\n\/\/ Providers stores the list of available providers\ntype Providers struct {\n\tProviders map[string]Provider\n}\n\n\/\/ Provider stores data for one provider\ntype Provider struct {\n\tName string `toml:\"-\"`\n\tPreCmd string `toml:\"pre_cmd\"`\n\tPostCmd string `toml:\"post_cmd\"`\n\tDetectionCmd string `toml:\"detect_cmd\"`\n\tBackupDir string `toml:\"backup_dir\"`\n}\n\ntype configToml struct {\n\tProviders map[string]Provider `toml:\"providers\"`\n}\n\n\/\/ LoadProviders returns the list of providers from the provider config file\nfunc LoadProviders(path string) (providers Providers, err error) {\n\tc := &configToml{}\n\tproviders.Providers = make(map[string]Provider)\n\t_, err = toml.DecodeFile(path, &c)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to load providers from config file: %s\", err)\n\t\treturn\n\t}\n\n\tfor key, value := range c.Providers {\n\t\tprovider := Provider{\n\t\t\tName: key,\n\t\t\tPreCmd: value.PreCmd,\n\t\t\tPostCmd: value.PostCmd,\n\t\t\tDetectionCmd: value.DetectionCmd,\n\t\t\tBackupDir: value.BackupDir,\n\t\t}\n\t\tproviders.Providers[key] = provider\n\t}\n\treturn\n}\n\n\/\/ GetProvider returns a provider based on detection commands\nfunc (providers *Providers) GetProvider(o orchestrators.Orchestrator, v *volume.Volume) (prov Provider, err error) {\n\tdetectionCmds := []string{}\n\tfor _, p := range providers.Providers {\n\t\tdetectionCmds = append(detectionCmds, fmt.Sprintf(\"(%s && echo '%s')\", p.DetectionCmd, p.Name))\n\t}\n\tdetectionCmds = append(detectionCmds, \"true\")\n\tfullDetectionCmd := strings.Join(detectionCmds, \" || \")\n\n\tcontainers, err := o.GetContainersMountingVolume(v)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(containers) < 1 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t}).Info(\"No running container found using the volume.\")\n\t\treturn\n\t}\n\n\tvar stdout string\n\tfor _, container := range containers {\n\t\tfullDetectionCmd = strings.Replace(fullDetectionCmd, \"$volume\", container.Path, -1)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t\t\"cmd\": fullDetectionCmd,\n\t\t}).Debugf(\"Running detection command in container %s...\", container.ContainerID)\n\n\t\tstdout, err = o.ContainerExec(container, []string{\"bash\", \"-c\", fullDetectionCmd})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to run provider detection: %s\", err)\n\t\t}\n\n\t\tstdout = strings.TrimSpace(stdout)\n\n\t\tfor _, p := range providers.Providers {\n\t\t\tif p.Name == stdout {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"volume\": v.Name,\n\t\t\t\t}).Infof(\"This volume should be a %s datadir\", p.Name)\n\t\t\t\tprov = p\n\t\t\t\tv.BackupDir = p.BackupDir\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ RunCmd runs a command into a container\nfunc RunCmd(p Provider, o orchestrators.Orchestrator, v *volume.Volume, cmd string) (err error) {\n\tcontainers, err := o.GetContainersMountingVolume(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdSuccess := false\n\tvar stdout string\n\tfor _, container := range containers {\n\t\tcmd = strings.Replace(cmd, \"$volume\", container.Path, -1)\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t\t\"cmd\": cmd,\n\t\t}).Debugf(\"Running command in container %s...\", container.ContainerID)\n\n\t\tstdout, err = o.ContainerExec(container, []string{\"bash\", \"-c\", cmd})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t\t\"cmd\": cmd,\n\t\t\t\t\"container\": container.ContainerID,\n\t\t\t}).Errorf(\"failed to run command in container: %s\", err)\n\t\t} else {\n\t\t\tcmdSuccess = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif cmdSuccess {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t\t\"cmd\": cmd,\n\t\t}).Debugf(\"stdout: %s\", stdout)\n\t} else {\n\t\treturn fmt.Errorf(\"failed to run command \\\"%s\\\" in containers mounting volume %s\", cmd, v.Name)\n\t}\n\treturn\n}\n<commit_msg>Ignore provider detection when container has no shell<commit_after>package providers\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/camptocamp\/bivac\/orchestrators\"\n\t\"github.com\/camptocamp\/bivac\/volume\"\n)\n\n\/\/ Providers stores the list of available providers\ntype Providers struct {\n\tProviders map[string]Provider\n}\n\n\/\/ Provider stores data for one provider\ntype Provider struct {\n\tName string `toml:\"-\"`\n\tPreCmd string `toml:\"pre_cmd\"`\n\tPostCmd string `toml:\"post_cmd\"`\n\tDetectionCmd string `toml:\"detect_cmd\"`\n\tBackupDir string `toml:\"backup_dir\"`\n}\n\ntype configToml struct {\n\tProviders map[string]Provider `toml:\"providers\"`\n}\n\n\/\/ LoadProviders returns the list of providers from the provider config file\nfunc LoadProviders(path string) (providers Providers, err error) {\n\tc := &configToml{}\n\tproviders.Providers = make(map[string]Provider)\n\t_, err = toml.DecodeFile(path, &c)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to load providers from config file: %s\", err)\n\t\treturn\n\t}\n\n\tfor key, value := range c.Providers {\n\t\tprovider := Provider{\n\t\t\tName: key,\n\t\t\tPreCmd: value.PreCmd,\n\t\t\tPostCmd: value.PostCmd,\n\t\t\tDetectionCmd: value.DetectionCmd,\n\t\t\tBackupDir: value.BackupDir,\n\t\t}\n\t\tproviders.Providers[key] = provider\n\t}\n\treturn\n}\n\n\/\/ GetProvider returns a provider based on detection commands\nfunc (providers *Providers) GetProvider(o orchestrators.Orchestrator, v *volume.Volume) (prov Provider, err error) {\n\tdetectionCmds := []string{}\n\tfor _, p := range providers.Providers {\n\t\tdetectionCmds = append(detectionCmds, fmt.Sprintf(\"(%s && echo '%s')\", p.DetectionCmd, p.Name))\n\t}\n\tdetectionCmds = append(detectionCmds, \"true\")\n\tfullDetectionCmd := strings.Join(detectionCmds, \" || \")\n\n\tcontainers, err := o.GetContainersMountingVolume(v)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(containers) < 1 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t}).Info(\"No running container found using the volume.\")\n\t\treturn\n\t}\n\n\tvar stdout string\n\tfor _, container := range containers {\n\t\tfullDetectionCmd = strings.Replace(fullDetectionCmd, \"$volume\", container.Path, -1)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t\t\"cmd\": fullDetectionCmd,\n\t\t}).Debugf(\"Running detection command in container %s...\", container.ContainerID)\n\n\t\tstdout, err = o.ContainerExec(container, []string{\"bash\", \"-c\", fullDetectionCmd})\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"failed to run provider detection: %s\", err)\n\t\t\terr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\tstdout = strings.TrimSpace(stdout)\n\n\t\tfor _, p := range providers.Providers {\n\t\t\tif p.Name == stdout {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"volume\": v.Name,\n\t\t\t\t}).Infof(\"This volume should be a %s datadir\", p.Name)\n\t\t\t\tprov = p\n\t\t\t\tv.BackupDir = p.BackupDir\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ RunCmd runs a command into a container\nfunc RunCmd(p Provider, o orchestrators.Orchestrator, v *volume.Volume, cmd string) (err error) {\n\tcontainers, err := o.GetContainersMountingVolume(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdSuccess := false\n\tvar stdout string\n\tfor _, container := range containers {\n\t\tcmd = strings.Replace(cmd, \"$volume\", container.Path, -1)\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t\t\"cmd\": cmd,\n\t\t}).Debugf(\"Running command in container %s...\", container.ContainerID)\n\n\t\tstdout, err = o.ContainerExec(container, []string{\"bash\", \"-c\", cmd})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t\t\"cmd\": cmd,\n\t\t\t\t\"container\": container.ContainerID,\n\t\t\t}).Errorf(\"failed to run command in container: %s\", err)\n\t\t} else {\n\t\t\tcmdSuccess = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif cmdSuccess {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": v.Name,\n\t\t\t\"cmd\": cmd,\n\t\t}).Debugf(\"stdout: %s\", stdout)\n\t} else {\n\t\treturn fmt.Errorf(\"failed to run command \\\"%s\\\" in containers mounting volume %s\", cmd, v.Name)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t. \"koding\/db\/models\"\n\thelper \"koding\/db\/mongodb\/modelhelper\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\n\/\/ hard delete.\nfunc DeleteStatusUpdate(id string) error {\n\terr := RemoveComments(id)\n\tif err != nil {\n\t\tlog.Error(\"Empty Status Update Cannot be deleted\")\n\t\treturn err\n\t}\n\n\terr = RemovePostRelationships(id)\n\tif err != nil {\n\t\tlog.Error(\"Empty Status Update Cannot be deleted\")\n\t\treturn err\n\t}\n\n\terr = helper.DeleteStatusUpdateById(id)\n\tif err != nil {\n\t\tlog.Error(\"Empty Status Update Cannot be deleted\")\n\t\treturn err\n\t}\n\n\tlog.Info(\"Deleted Empty Status Update\")\n\treturn nil\n}\n\n\/\/Creates Relationships both in mongo and neo4j\nfunc CreateRelationship(relationship *Relationship) error {\n\terr := CreateGraphRelationship(relationship)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Add Mongo Relationship\")\n\treturn helper.AddRelationship(relationship)\n}\n\n\/\/Removes Relationships both from mongo and neo4j\nfunc RemoveRelationship(relationship *Relationship) error {\n\terr := RemoveGraphRelationship(relationship)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tselector := helper.Selector{\n\t\t\"sourceId\": relationship.SourceId,\n\t\t\"targetId\": relationship.TargetId,\n\t\t\"as\": relationship.As,\n\t}\n\tlog.Debug(\"Delete Mongo Relationship\")\n\treturn helper.DeleteRelationship(selector)\n}\n\n\/\/Finds synonym of a given tag by tagId\nfunc FindSynonym(tagId string) (*Tag, error) {\n\tselector := helper.Selector{\"sourceId\": helper.GetObjectId(tagId), \"as\": \"synonymOf\"}\n\tsynonymRel, err := helper.GetRelationship(selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn helper.GetTagById(synonymRel.TargetId.Hex())\n}\n\nfunc RemoveComments(id string) error {\n\tobjectId := helper.GetObjectId(id)\n\tselector := helper.Selector{\"targetId\": objectId, \"sourceName\": \"JComment\"}\n\n\tremovedNodeIds := make([]bson.ObjectId, 0)\n\trels, err := helper.GetRelationships(selector)\n\tif len(rels) == 0 {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, rel := range rels {\n\t\terr = RemoveRelationship(&rel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tremovedNodeIds = append(removedNodeIds, rel.SourceId)\n\t}\n\n\t\/\/remove comment relationships with opposite orientation\n\tselector = helper.Selector{\"targetId\": helper.Selector{\"$in\": removedNodeIds}}\n\terr = RemoveRelationships(selector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tselector = helper.Selector{\"_id\": helper.Selector{\"$in\": removedNodeIds}}\n\treturn helper.DeleteComment(selector)\n}\n\nfunc RemovePostRelationships(id string) error {\n\tobjectId := helper.GetObjectId(id)\n\t\/\/ remove post relationships\n\tselector := helper.Selector{\"$or\": []helper.Selector{\n\t\thelper.Selector{\"sourceId\": objectId},\n\t\thelper.Selector{\"targetId\": objectId},\n\t}}\n\treturn RemoveRelationships(selector)\n}\n\nfunc RemoveRelationships(selector helper.Selector) error {\n\trels, err := helper.GetRelationships(selector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, rel := range rels {\n\t\terr = RemoveRelationship(&rel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Moderation: error details are logged for DeleteStatusUpdate<commit_after>package topicmodifier\n\nimport (\n\t. \"koding\/db\/models\"\n\thelper \"koding\/db\/mongodb\/modelhelper\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\n\/\/ hard delete.\nfunc DeleteStatusUpdate(id string) error {\n\terr := RemoveComments(id)\n\tif err != nil {\n\t\tlog.Error(\"Empty Status Update Cannot be deleted: %s\", err.Error())\n\t\treturn err\n\t}\n\n\terr = RemovePostRelationships(id)\n\tif err != nil {\n\t\tlog.Error(\"Empty Status Update Cannot be deleted: %s\", err.Error())\n\t\treturn err\n\t}\n\n\terr = helper.DeleteStatusUpdateById(id)\n\tif err != nil {\n\t\tlog.Error(\"Empty Status Update Cannot be deleted: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tlog.Info(\"Deleted Empty Status Update\")\n\treturn nil\n}\n\n\/\/Creates Relationships both in mongo and neo4j\nfunc CreateRelationship(relationship *Relationship) error {\n\terr := CreateGraphRelationship(relationship)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Add Mongo Relationship\")\n\treturn helper.AddRelationship(relationship)\n}\n\n\/\/Removes Relationships both from mongo and neo4j\nfunc RemoveRelationship(relationship *Relationship) error {\n\terr := RemoveGraphRelationship(relationship)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tselector := helper.Selector{\n\t\t\"sourceId\": relationship.SourceId,\n\t\t\"targetId\": relationship.TargetId,\n\t\t\"as\": relationship.As,\n\t}\n\tlog.Debug(\"Delete Mongo Relationship\")\n\treturn helper.DeleteRelationship(selector)\n}\n\n\/\/Finds synonym of a given tag by tagId\nfunc FindSynonym(tagId string) (*Tag, error) {\n\tselector := helper.Selector{\"sourceId\": helper.GetObjectId(tagId), \"as\": \"synonymOf\"}\n\tsynonymRel, err := helper.GetRelationship(selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn helper.GetTagById(synonymRel.TargetId.Hex())\n}\n\nfunc RemoveComments(id string) error {\n\tobjectId := helper.GetObjectId(id)\n\tselector := helper.Selector{\"targetId\": objectId, \"sourceName\": \"JComment\"}\n\n\tremovedNodeIds := make([]bson.ObjectId, 0)\n\trels, err := helper.GetRelationships(selector)\n\tif len(rels) == 0 {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, rel := range rels {\n\t\terr = RemoveRelationship(&rel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tremovedNodeIds = append(removedNodeIds, rel.SourceId)\n\t}\n\n\t\/\/remove comment relationships with opposite orientation\n\tselector = helper.Selector{\"targetId\": helper.Selector{\"$in\": removedNodeIds}}\n\terr = RemoveRelationships(selector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tselector = helper.Selector{\"_id\": helper.Selector{\"$in\": removedNodeIds}}\n\treturn helper.DeleteComment(selector)\n}\n\nfunc RemovePostRelationships(id string) error {\n\tobjectId := helper.GetObjectId(id)\n\t\/\/ remove post relationships\n\tselector := helper.Selector{\"$or\": []helper.Selector{\n\t\thelper.Selector{\"sourceId\": objectId},\n\t\thelper.Selector{\"targetId\": objectId},\n\t}}\n\treturn RemoveRelationships(selector)\n}\n\nfunc RemoveRelationships(selector helper.Selector) error {\n\trels, err := helper.GetRelationships(selector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, rel := range rels {\n\t\terr = RemoveRelationship(&rel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package webhook\n\nimport (\n\t\"socialapi\/models\"\n\t\"socialapi\/request\"\n\t\"testing\"\n\n\t\"github.com\/koding\/runner\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestIntegrationCreate(t *testing.T) {\n\tr := runner.New(\"test\")\n\tif err := r.Init(); err != nil {\n\t\tt.Fatalf(\"couldnt start bongo %s\", err)\n\t}\n\tdefer r.Close()\n\n\tConvey(\"while creating an integration\", t, func() {\n\t\tConvey(\"it should contain both title and type constant\", func() {\n\t\t\ti := NewIntegration()\n\t\t\ti.TypeConstant = Integration_TYPE_INCOMING\n\t\t\terr := i.Create()\n\t\t\tSo(err, ShouldEqual, ErrNameNotSet)\n\n\t\t\ti.Name = models.RandomName()\n\t\t\terr = i.Create()\n\t\t\tSo(err, ShouldEqual, ErrTitleNotSet)\n\n\t\t\ti.TypeConstant = \"\"\n\t\t\ti.Title = models.RandomGroupName()\n\n\t\t\terr = i.Create()\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(i.TypeConstant, ShouldEqual, Integration_TYPE_INCOMING)\n\n\t\t\ti.Id = 0\n\t\t\terr = i.Create()\n\t\t\tSo(err, ShouldEqual, ErrNameNotUnique)\n\n\t\t\tConvey(\"it should be fetched via name\", func() {\n\t\t\t\tni := NewIntegration()\n\t\t\t\tname := models.RandomName()\n\t\t\t\terr := ni.ByName(name)\n\t\t\t\tSo(err, ShouldEqual, ErrIntegrationNotFound)\n\n\t\t\t\terr = ni.ByName(i.Name)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(ni.Id, ShouldEqual, i.Id)\n\t\t\t})\n\t\t})\n\t})\n\n}\n\nfunc TestIntegrationList(t *testing.T) {\n\tr := runner.New(\"test\")\n\tif err := r.Init(); err != nil {\n\t\tt.Fatalf(\"couldnt start bongo %s\", err)\n\t}\n\tdefer r.Close()\n\n\tConvey(\"while listing an integration\", t, func() {\n\t\tname1 := \".B\" + models.RandomGroupName()\n\t\tname2 := \".A\" + models.RandomGroupName()\n\t\tfirstInt := CreateIntegration(t, name1)\n\t\tsecondInt := CreateIntegration(t, name2)\n\n\t\tConvey(\"it should sort integrations by name\", func() {\n\t\t\ti := NewIntegration()\n\t\t\tints, err := i.List(&request.Query{})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tSo(len(ints), ShouldBeGreaterThanOrEqualTo, 2)\n\t\t\tSo(ints[0].Name, ShouldEqual, name2)\n\t\t\tSo(ints[1].Name, ShouldEqual, name1)\n\t\t})\n\n\t\tReset(func() {\n\t\t\terr := firstInt.Delete()\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\terr = secondInt.Delete()\n\t\t\tSo(err, ShouldBeNil)\n\t\t})\n\t})\n}\n<commit_msg>integration: fix flaky test<commit_after>package webhook\n\nimport (\n\t\"socialapi\/models\"\n\t\"socialapi\/request\"\n\t\"testing\"\n\n\t\"github.com\/koding\/runner\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestIntegrationCreate(t *testing.T) {\n\tr := runner.New(\"test\")\n\tif err := r.Init(); err != nil {\n\t\tt.Fatalf(\"couldnt start bongo %s\", err)\n\t}\n\tdefer r.Close()\n\n\tConvey(\"while creating an integration\", t, func() {\n\t\tConvey(\"it should contain both title and type constant\", func() {\n\t\t\ti := NewIntegration()\n\t\t\ti.TypeConstant = Integration_TYPE_INCOMING\n\t\t\terr := i.Create()\n\t\t\tSo(err, ShouldEqual, ErrNameNotSet)\n\n\t\t\ti.Name = models.RandomName()\n\t\t\terr = i.Create()\n\t\t\tSo(err, ShouldEqual, ErrTitleNotSet)\n\n\t\t\ti.TypeConstant = \"\"\n\t\t\ti.Title = models.RandomGroupName()\n\n\t\t\terr = i.Create()\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(i.TypeConstant, ShouldEqual, Integration_TYPE_INCOMING)\n\n\t\t\ti.Id = 0\n\t\t\terr = i.Create()\n\t\t\tSo(err, ShouldEqual, ErrNameNotUnique)\n\n\t\t\tConvey(\"it should be fetched via name\", func() {\n\t\t\t\tni := NewIntegration()\n\t\t\t\tname := models.RandomName()\n\t\t\t\terr := ni.ByName(name)\n\t\t\t\tSo(err, ShouldEqual, ErrIntegrationNotFound)\n\n\t\t\t\terr = ni.ByName(i.Name)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(ni.Id, ShouldEqual, i.Id)\n\t\t\t})\n\t\t})\n\t})\n\n}\n\nfunc TestIntegrationList(t *testing.T) {\n\tr := runner.New(\"test\")\n\tif err := r.Init(); err != nil {\n\t\tt.Fatalf(\"couldnt start bongo %s\", err)\n\t}\n\tdefer r.Close()\n\n\tConvey(\"while listing an integration\", t, func() {\n\t\tname1 := \"001\" + models.RandomGroupName()\n\t\tname2 := \"000\" + models.RandomGroupName()\n\t\tfirstInt := CreateIntegration(t, name1)\n\t\tsecondInt := CreateIntegration(t, name2)\n\n\t\tConvey(\"it should sort integrations by name\", func() {\n\t\t\ti := NewIntegration()\n\t\t\tints, err := i.List(&request.Query{})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tSo(len(ints), ShouldBeGreaterThanOrEqualTo, 2)\n\t\t\tSo(ints[0].Name, ShouldEqual, name2)\n\t\t\tSo(ints[1].Name, ShouldEqual, name1)\n\t\t})\n\n\t\tReset(func() {\n\t\t\terr := firstInt.Delete()\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\terr = secondInt.Delete()\n\t\t\tSo(err, ShouldBeNil)\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage line\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"testing\"\n)\n\nvar testOutput = []byte(\"0123456789abcdefghijklmnopqrstuvwxy\")\nvar testInput = []byte(\"012\\n345\\n678\\n9ab\\ncde\\nfgh\\nijk\\nlmn\\nopq\\nrst\\nuvw\\nxy\")\nvar testInputrn = []byte(\"012\\r\\n345\\r\\n678\\r\\n9ab\\r\\ncde\\r\\nfgh\\r\\nijk\\r\\nlmn\\r\\nopq\\r\\nrst\\r\\nuvw\\r\\nxy\\r\\n\\n\\r\\n\")\n\n\/\/ TestReader wraps a []byte and returns reads of a specific length.\ntype testReader struct {\n\tdata []byte\n\tstride int\n}\n\nfunc (t *testReader) Read(buf []byte) (n int, err os.Error) {\n\tn = t.stride\n\tif n > len(t.data) {\n\t\tn = len(t.data)\n\t}\n\tif n > len(buf) {\n\t\tn = len(buf)\n\t}\n\tcopy(buf, t.data)\n\tt.data = t.data[n:]\n\tif len(t.data) == 0 {\n\t\terr = os.EOF\n\t}\n\treturn\n}\n\nfunc testLineReader(t *testing.T, input []byte) {\n\tfor stride := 1; stride < len(input); stride++ {\n\t\tdone := 0\n\t\treader := testReader{input, stride}\n\t\tl := NewReader(&reader, len(input)+1)\n\t\tfor {\n\t\t\tline, isPrefix, err := l.ReadLine()\n\t\t\tif len(line) > 0 && err != nil {\n\t\t\t\tt.Errorf(\"ReadLine returned both data and error: %s\\n\")\n\t\t\t}\n\t\t\tif isPrefix {\n\t\t\t\tt.Errorf(\"ReadLine returned prefix\\n\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tif err != os.EOF {\n\t\t\t\t\tt.Fatalf(\"Got unknown error: %s\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif want := testOutput[done : done+len(line)]; !bytes.Equal(want, line) {\n\t\t\t\tt.Errorf(\"Bad line at stride %d: want: %x got: %x\", stride, want, line)\n\t\t\t}\n\t\t\tdone += len(line)\n\t\t}\n\t\tif done != len(testOutput) {\n\t\t\tt.Error(\"ReadLine didn't return everything\")\n\t\t}\n\t}\n}\n\nfunc TestReader(t *testing.T) {\n\ttestLineReader(t, testInput)\n\ttestLineReader(t, testInputrn)\n}\n\nfunc TestLineTooLong(t *testing.T) {\n\tbuf := bytes.NewBuffer([]byte(\"aaabbbcc\\n\"))\n\tl := NewReader(buf, 3)\n\tline, isPrefix, err := l.ReadLine()\n\tif !isPrefix || !bytes.Equal(line, []byte(\"aaa\")) || err != nil {\n\t\tt.Errorf(\"bad result for first line: %x %s\", line, err)\n\t}\n\tline, isPrefix, err = l.ReadLine()\n\tif !isPrefix || !bytes.Equal(line, []byte(\"bbb\")) || err != nil {\n\t\tt.Errorf(\"bad result for second line: %x\", line)\n\t}\n\tline, isPrefix, err = l.ReadLine()\n\tif isPrefix || !bytes.Equal(line, []byte(\"cc\")) || err != nil {\n\t\tt.Errorf(\"bad result for third line: %x\", line)\n\t}\n}\n<commit_msg>encoding\/line: fix error call (missing argument)<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage line\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"testing\"\n)\n\nvar testOutput = []byte(\"0123456789abcdefghijklmnopqrstuvwxy\")\nvar testInput = []byte(\"012\\n345\\n678\\n9ab\\ncde\\nfgh\\nijk\\nlmn\\nopq\\nrst\\nuvw\\nxy\")\nvar testInputrn = []byte(\"012\\r\\n345\\r\\n678\\r\\n9ab\\r\\ncde\\r\\nfgh\\r\\nijk\\r\\nlmn\\r\\nopq\\r\\nrst\\r\\nuvw\\r\\nxy\\r\\n\\n\\r\\n\")\n\n\/\/ TestReader wraps a []byte and returns reads of a specific length.\ntype testReader struct {\n\tdata []byte\n\tstride int\n}\n\nfunc (t *testReader) Read(buf []byte) (n int, err os.Error) {\n\tn = t.stride\n\tif n > len(t.data) {\n\t\tn = len(t.data)\n\t}\n\tif n > len(buf) {\n\t\tn = len(buf)\n\t}\n\tcopy(buf, t.data)\n\tt.data = t.data[n:]\n\tif len(t.data) == 0 {\n\t\terr = os.EOF\n\t}\n\treturn\n}\n\nfunc testLineReader(t *testing.T, input []byte) {\n\tfor stride := 1; stride < len(input); stride++ {\n\t\tdone := 0\n\t\treader := testReader{input, stride}\n\t\tl := NewReader(&reader, len(input)+1)\n\t\tfor {\n\t\t\tline, isPrefix, err := l.ReadLine()\n\t\t\tif len(line) > 0 && err != nil {\n\t\t\t\tt.Errorf(\"ReadLine returned both data and error: %s\", err)\n\t\t\t}\n\t\t\tif isPrefix {\n\t\t\t\tt.Errorf(\"ReadLine returned prefix\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tif err != os.EOF {\n\t\t\t\t\tt.Fatalf(\"Got unknown error: %s\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif want := testOutput[done : done+len(line)]; !bytes.Equal(want, line) {\n\t\t\t\tt.Errorf(\"Bad line at stride %d: want: %x got: %x\", stride, want, line)\n\t\t\t}\n\t\t\tdone += len(line)\n\t\t}\n\t\tif done != len(testOutput) {\n\t\t\tt.Error(\"ReadLine didn't return everything\")\n\t\t}\n\t}\n}\n\nfunc TestReader(t *testing.T) {\n\ttestLineReader(t, testInput)\n\ttestLineReader(t, testInputrn)\n}\n\nfunc TestLineTooLong(t *testing.T) {\n\tbuf := bytes.NewBuffer([]byte(\"aaabbbcc\\n\"))\n\tl := NewReader(buf, 3)\n\tline, isPrefix, err := l.ReadLine()\n\tif !isPrefix || !bytes.Equal(line, []byte(\"aaa\")) || err != nil {\n\t\tt.Errorf(\"bad result for first line: %x %s\", line, err)\n\t}\n\tline, isPrefix, err = l.ReadLine()\n\tif !isPrefix || !bytes.Equal(line, []byte(\"bbb\")) || err != nil {\n\t\tt.Errorf(\"bad result for second line: %x\", line)\n\t}\n\tline, isPrefix, err = l.ReadLine()\n\tif isPrefix || !bytes.Equal(line, []byte(\"cc\")) || err != nil {\n\t\tt.Errorf(\"bad result for third line: %x\", line)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kedge Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kubernetes\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/kedgeproject\/kedge\/pkg\/spec\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\tapi_v1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n\text_v1beta1 \"k8s.io\/client-go\/pkg\/apis\/extensions\/v1beta1\"\n\n\t\/\/ install api (register and add types to api.Schema)\n\t_ \"k8s.io\/client-go\/pkg\/api\/install\"\n\t_ \"k8s.io\/client-go\/pkg\/apis\/extensions\/install\"\n)\n\nfunc getLabels(app *spec.App) map[string]string {\n\tlabels := map[string]string{\"app\": app.Name}\n\treturn labels\n}\n\nfunc createIngresses(app *spec.App) ([]runtime.Object, error) {\n\tvar ings []runtime.Object\n\n\tfor _, i := range app.Ingresses {\n\t\ting := &ext_v1beta1.Ingress{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: i.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tSpec: i.IngressSpec,\n\t\t}\n\t\tings = append(ings, ing)\n\t}\n\treturn ings, nil\n}\n\nfunc createServices(app *spec.App) ([]runtime.Object, error) {\n\tvar svcs []runtime.Object\n\tfor _, s := range app.Services {\n\t\tsvc := &api_v1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: s.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tSpec: s.ServiceSpec,\n\t\t}\n\t\tfor _, servicePortMod := range s.Ports {\n\t\t\tsvc.Spec.Ports = append(svc.Spec.Ports, servicePortMod.ServicePort)\n\t\t}\n\t\tif len(svc.Spec.Selector) == 0 {\n\t\t\tsvc.Spec.Selector = app.Labels\n\t\t}\n\t\tsvcs = append(svcs, svc)\n\n\t\t\/\/ Generate ingress if \"endpoint\" is mentioned in app.Services.Ports[].Endpoint\n\t\tfor _, port := range s.Ports {\n\t\t\tif port.Endpoint != \"\" {\n\t\t\t\tvar host string\n\t\t\t\tvar path string\n\t\t\t\tendpoint := strings.SplitN(port.Endpoint, \"\/\", 2)\n\t\t\t\tswitch len(endpoint) {\n\t\t\t\tcase 1:\n\t\t\t\t\thost = endpoint[0]\n\t\t\t\t\tpath = \"\/\"\n\t\t\t\tcase 2:\n\t\t\t\t\thost = endpoint[0]\n\t\t\t\t\tpath = \"\/\" + endpoint[1]\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"Invalid syntax for endpoint: %v\", port.Endpoint)\n\t\t\t\t}\n\n\t\t\t\tingressName := s.Name + \"-\" + strconv.FormatInt(int64(port.Port), 10)\n\t\t\t\tendpointIngress := &ext_v1beta1.Ingress{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: ingressName,\n\t\t\t\t\t\tLabels: app.Labels,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: ext_v1beta1.IngressSpec{\n\t\t\t\t\t\tRules: []ext_v1beta1.IngressRule{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tHost: host,\n\t\t\t\t\t\t\t\tIngressRuleValue: ext_v1beta1.IngressRuleValue{\n\t\t\t\t\t\t\t\t\tHTTP: &ext_v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\t\t\t\tPaths: []ext_v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tPath: path,\n\t\t\t\t\t\t\t\t\t\t\t\tBackend: ext_v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\t\t\t\tServiceName: s.Name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tServicePort: intstr.IntOrString{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tIntVal: port.Port,\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tsvcs = append(svcs, endpointIngress)\n\t\t\t}\n\t\t}\n\t}\n\treturn svcs, nil\n}\n\n\/\/ Creates a Deployment Kubernetes resource. The returned Deployment resource\n\/\/ will be nil if it could not be generated due to insufficient input data.\nfunc createDeployment(app *spec.App) (*ext_v1beta1.Deployment, error) {\n\n\t\/\/ We need to error out if both, app.PodSpec and app.DeploymentSpec are empty\n\tif reflect.DeepEqual(app.PodSpec, api_v1.PodSpec{}) && reflect.DeepEqual(app.DeploymentSpec, ext_v1beta1.DeploymentSpec{}) {\n\t\tlog.Debug(\"Both, app.PodSpec and app.DeploymentSpec are empty, not enough data to create a deployment.\")\n\t\treturn nil, nil\n\t}\n\n\t\/\/ We are merging whole DeploymentSpec with PodSpec.\n\t\/\/ This means that someone could specify containers in template.spec and also in top level PodSpec.\n\t\/\/ This stupid check is supposed to make sure that only one of them set.\n\t\/\/ TODO: merge DeploymentSpec.Template.Spec and top level PodSpec\n\tif !(reflect.DeepEqual(app.DeploymentSpec.Template.Spec, api_v1.PodSpec{}) || reflect.DeepEqual(app.PodSpec, api_v1.PodSpec{})) {\n\t\treturn nil, fmt.Errorf(\"Pod can't be specfied in two places. Use top level PodSpec or template.spec (DeploymentSpec.Template.Spec) not both\")\n\t}\n\n\tdeploymentSpec := app.DeploymentSpec\n\n\t\/\/ top level PodSpec is not empty, use it for deployment template\n\t\/\/ we already know that if app.PodSpec is not empty app.DeploymentSpec.Template.Spec is empty\n\tif !reflect.DeepEqual(app.PodSpec, api_v1.PodSpec{}) {\n\t\tdeploymentSpec.Template.Spec = app.PodSpec\n\t}\n\n\t\/\/ TODO: check if this wasn't set by user, in that case we shouldn't ovewrite it\n\tdeploymentSpec.Template.ObjectMeta.Name = app.Name\n\n\t\/\/ TODO: merge with already existing labels and avoid duplication\n\tdeploymentSpec.Template.ObjectMeta.Labels = app.Labels\n\n\tdeployment := ext_v1beta1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: app.Name,\n\t\t\tLabels: app.Labels,\n\t\t},\n\t\tSpec: deploymentSpec,\n\t}\n\n\treturn &deployment, nil\n}\n\n\/\/ create PVC reading the root level persistent volume field\nfunc createPVC(v spec.VolumeClaim, labels map[string]string) (*api_v1.PersistentVolumeClaim, error) {\n\t\/\/ check for conditions where user has given both conflicting fields\n\t\/\/ or not given either fields\n\tif v.Size != \"\" && v.Resources.Requests != nil {\n\t\treturn nil, fmt.Errorf(\"persistent volume %q, cannot provide size and resources at the same time\", v.Name)\n\t}\n\tif v.Size == \"\" && v.Resources.Requests == nil {\n\t\treturn nil, fmt.Errorf(\"persistent volume %q, please provide size or resources, none given\", v.Name)\n\t}\n\n\t\/\/ if user has given size then create a \"api_v1.ResourceRequirements\"\n\t\/\/ because this can be fed to pvc directly\n\tif v.Size != \"\" {\n\t\tsize, err := resource.ParseQuantity(v.Size)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"could not read volume size\")\n\t\t}\n\t\t\/\/ update the volume's resource so that it can be fed\n\t\tv.Resources = api_v1.ResourceRequirements{\n\t\t\tRequests: api_v1.ResourceList{\n\t\t\t\tapi_v1.ResourceStorage: size,\n\t\t\t},\n\t\t}\n\t}\n\t\/\/ setting the default accessmode if none given by user\n\tif len(v.AccessModes) == 0 {\n\t\tv.AccessModes = []api_v1.PersistentVolumeAccessMode{api_v1.ReadWriteOnce}\n\t}\n\tpvc := &api_v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: v.Name,\n\t\t\tLabels: labels,\n\t\t},\n\t\t\/\/ since we updated the pvc spec before so this can be directly fed\n\t\t\/\/ without having to do any addition extra\n\t\tSpec: api_v1.PersistentVolumeClaimSpec(v.PersistentVolumeClaimSpec),\n\t}\n\treturn pvc, nil\n}\n\n\/\/ Since we are automatically creating pvc from\n\/\/ root level persistent volume and entry in the container\n\/\/ volume mount, we also need to update the pod's volume field\nfunc populateVolumes(app *spec.App) error {\n\tfor cn, c := range app.PodSpec.Containers {\n\t\tfor vn, vm := range c.VolumeMounts {\n\t\t\tif isPVCDefined(app.VolumeClaims, vm.Name) && !isVolumeDefined(app.Volumes, vm.Name) {\n\t\t\t\tapp.Volumes = append(app.Volumes, api_v1.Volume{\n\t\t\t\t\tName: vm.Name,\n\t\t\t\t\tVolumeSource: api_v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &api_v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: vm.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t} else if !isVolumeDefined(app.Volumes, vm.Name) {\n\t\t\t\t\/\/ pvc is not defined so we need to check if the entry is made in the pod volumes\n\t\t\t\t\/\/ since a volumeMount entry without entry in pod level volumes might cause failure\n\t\t\t\t\/\/ while deployment since that would not be a complete configuration\n\t\t\t\treturn fmt.Errorf(\"neither root level Persistent Volume\"+\n\t\t\t\t\t\" nor Volume in pod spec defined for %q, \"+\n\t\t\t\t\t\"in app.containers[%d].volumeMounts[%d]\", vm.Name, cn, vn)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createSecrets(app *spec.App) ([]runtime.Object, error) {\n\tvar secrets []runtime.Object\n\n\tfor _, s := range app.Secrets {\n\t\tsecret := &api_v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: s.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tData: s.Data,\n\t\t\tStringData: s.StringData,\n\t\t\tType: s.Type,\n\t\t}\n\t\tsecrets = append(secrets, secret)\n\t}\n\treturn secrets, nil\n}\n\n\/\/ CreateK8sObjects, if given object spec.App, this function reads\n\/\/ them and returns kubernetes objects as list of runtime.Object\n\/\/ If the app is using field 'extraResources' then it will\n\/\/ also return file names mentioned there as list of string\nfunc CreateK8sObjects(app *spec.App) ([]runtime.Object, []string, error) {\n\tvar objects []runtime.Object\n\n\tif app.Labels == nil {\n\t\tapp.Labels = getLabels(app)\n\t}\n\n\tsvcs, err := createServices(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"Unable to create Kubernetes Service\")\n\t}\n\n\tings, err := createIngresses(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"Unable to create Kubernetes Ingresses\")\n\t}\n\n\tsecs, err := createSecrets(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"Unable to create Kubernetes Secrets\")\n\t}\n\n\tapp.PodSpec.Containers, err = populateContainers(app.Containers, app.ConfigMaps, app.Secrets)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\tlog.Debugf(\"object after population: %#v\\n\", app)\n\n\tapp.PodSpec.InitContainers, err = populateContainers(app.InitContainers, app.ConfigMaps, app.Secrets)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\tlog.Debugf(\"object after population: %#v\\n\", app)\n\n\t\/\/ create pvc for each root level persistent volume\n\tvar pvcs []runtime.Object\n\tfor _, v := range app.VolumeClaims {\n\t\tpvc, err := createPVC(v, app.Labels)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t\t}\n\t\tpvcs = append(pvcs, pvc)\n\t}\n\tif err := populateVolumes(app); err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\n\tvar configMap []runtime.Object\n\tfor _, cd := range app.ConfigMaps {\n\t\tcm := &api_v1.ConfigMap{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: cd.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tData: cd.Data,\n\t\t}\n\n\t\tconfigMap = append(configMap, cm)\n\t}\n\n\tdeployment, err := createDeployment(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\n\t\/\/ deployment will be nil if no deployment is generated and no error occurs,\n\t\/\/ so we only need to append this when a legit deployment resource is returned\n\tif deployment != nil {\n\t\tobjects = append(objects, deployment)\n\t\tlog.Debugf(\"app: %s, deployment: %s\\n\", app.Name, spew.Sprint(deployment))\n\t}\n\tobjects = append(objects, configMap...)\n\tlog.Debugf(\"app: %s, configMap: %s\\n\", app.Name, spew.Sprint(configMap))\n\n\tobjects = append(objects, svcs...)\n\tlog.Debugf(\"app: %s, service: %s\\n\", app.Name, spew.Sprint(svcs))\n\n\tobjects = append(objects, ings...)\n\tlog.Debugf(\"app: %s, ingress: %s\\n\", app.Name, spew.Sprint(ings))\n\n\tobjects = append(objects, pvcs...)\n\tlog.Debugf(\"app: %s, pvc: %s\\n\", app.Name, spew.Sprint(pvcs))\n\n\tobjects = append(objects, secs...)\n\tlog.Debugf(\"app: %s, secret: %s\\n\", app.Name, spew.Sprint(secs))\n\n\treturn objects, app.ExtraResources, nil\n}\n\n\/\/ Transform function if given spec.App data creates the versioned\n\/\/ kubernetes objects and returns them in list of runtime.Object\n\/\/ And if the field in spec.App called 'extraResources' is used\n\/\/ then it returns the filenames mentioned there as list of string\nfunc Transform(app *spec.App) ([]runtime.Object, []string, error) {\n\n\truntimeObjects, extraResources, err := CreateK8sObjects(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to create Kubernetes objects\")\n\t}\n\n\tif len(runtimeObjects) == 0 {\n\t\treturn nil, nil, errors.New(\"No runtime objects created, possibly because not enough input data was passed\")\n\t}\n\n\tfor _, runtimeObject := range runtimeObjects {\n\n\t\tgvk, isUnversioned, err := api.Scheme.ObjectKind(runtimeObject)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrap(err, \"ConvertToVersion failed\")\n\t\t}\n\t\tif isUnversioned {\n\t\t\treturn nil, nil, fmt.Errorf(\"ConvertToVersion failed: can't output unversioned type: %T\", runtimeObject)\n\t\t}\n\n\t\truntimeObject.GetObjectKind().SetGroupVersionKind(gvk)\n\t}\n\n\treturn runtimeObjects, extraResources, nil\n}\n<commit_msg>order artifacts generation<commit_after>\/*\nCopyright 2017 The Kedge Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kubernetes\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/kedgeproject\/kedge\/pkg\/spec\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\tapi_v1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n\text_v1beta1 \"k8s.io\/client-go\/pkg\/apis\/extensions\/v1beta1\"\n\n\t\/\/ install api (register and add types to api.Schema)\n\t_ \"k8s.io\/client-go\/pkg\/api\/install\"\n\t_ \"k8s.io\/client-go\/pkg\/apis\/extensions\/install\"\n)\n\nfunc getLabels(app *spec.App) map[string]string {\n\tlabels := map[string]string{\"app\": app.Name}\n\treturn labels\n}\n\nfunc createIngresses(app *spec.App) ([]runtime.Object, error) {\n\tvar ings []runtime.Object\n\n\tfor _, i := range app.Ingresses {\n\t\ting := &ext_v1beta1.Ingress{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: i.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tSpec: i.IngressSpec,\n\t\t}\n\t\tings = append(ings, ing)\n\t}\n\treturn ings, nil\n}\n\nfunc createServices(app *spec.App) ([]runtime.Object, error) {\n\tvar svcs []runtime.Object\n\tfor _, s := range app.Services {\n\t\tsvc := &api_v1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: s.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tSpec: s.ServiceSpec,\n\t\t}\n\t\tfor _, servicePortMod := range s.Ports {\n\t\t\tsvc.Spec.Ports = append(svc.Spec.Ports, servicePortMod.ServicePort)\n\t\t}\n\t\tif len(svc.Spec.Selector) == 0 {\n\t\t\tsvc.Spec.Selector = app.Labels\n\t\t}\n\t\tsvcs = append(svcs, svc)\n\n\t\t\/\/ Generate ingress if \"endpoint\" is mentioned in app.Services.Ports[].Endpoint\n\t\tfor _, port := range s.Ports {\n\t\t\tif port.Endpoint != \"\" {\n\t\t\t\tvar host string\n\t\t\t\tvar path string\n\t\t\t\tendpoint := strings.SplitN(port.Endpoint, \"\/\", 2)\n\t\t\t\tswitch len(endpoint) {\n\t\t\t\tcase 1:\n\t\t\t\t\thost = endpoint[0]\n\t\t\t\t\tpath = \"\/\"\n\t\t\t\tcase 2:\n\t\t\t\t\thost = endpoint[0]\n\t\t\t\t\tpath = \"\/\" + endpoint[1]\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"Invalid syntax for endpoint: %v\", port.Endpoint)\n\t\t\t\t}\n\n\t\t\t\tingressName := s.Name + \"-\" + strconv.FormatInt(int64(port.Port), 10)\n\t\t\t\tendpointIngress := &ext_v1beta1.Ingress{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: ingressName,\n\t\t\t\t\t\tLabels: app.Labels,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: ext_v1beta1.IngressSpec{\n\t\t\t\t\t\tRules: []ext_v1beta1.IngressRule{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tHost: host,\n\t\t\t\t\t\t\t\tIngressRuleValue: ext_v1beta1.IngressRuleValue{\n\t\t\t\t\t\t\t\t\tHTTP: &ext_v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\t\t\t\tPaths: []ext_v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tPath: path,\n\t\t\t\t\t\t\t\t\t\t\t\tBackend: ext_v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\t\t\t\tServiceName: s.Name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tServicePort: intstr.IntOrString{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tIntVal: port.Port,\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tsvcs = append(svcs, endpointIngress)\n\t\t\t}\n\t\t}\n\t}\n\treturn svcs, nil\n}\n\n\/\/ Creates a Deployment Kubernetes resource. The returned Deployment resource\n\/\/ will be nil if it could not be generated due to insufficient input data.\nfunc createDeployment(app *spec.App) (*ext_v1beta1.Deployment, error) {\n\n\t\/\/ We need to error out if both, app.PodSpec and app.DeploymentSpec are empty\n\tif reflect.DeepEqual(app.PodSpec, api_v1.PodSpec{}) && reflect.DeepEqual(app.DeploymentSpec, ext_v1beta1.DeploymentSpec{}) {\n\t\tlog.Debug(\"Both, app.PodSpec and app.DeploymentSpec are empty, not enough data to create a deployment.\")\n\t\treturn nil, nil\n\t}\n\n\t\/\/ We are merging whole DeploymentSpec with PodSpec.\n\t\/\/ This means that someone could specify containers in template.spec and also in top level PodSpec.\n\t\/\/ This stupid check is supposed to make sure that only one of them set.\n\t\/\/ TODO: merge DeploymentSpec.Template.Spec and top level PodSpec\n\tif !(reflect.DeepEqual(app.DeploymentSpec.Template.Spec, api_v1.PodSpec{}) || reflect.DeepEqual(app.PodSpec, api_v1.PodSpec{})) {\n\t\treturn nil, fmt.Errorf(\"Pod can't be specfied in two places. Use top level PodSpec or template.spec (DeploymentSpec.Template.Spec) not both\")\n\t}\n\n\tdeploymentSpec := app.DeploymentSpec\n\n\t\/\/ top level PodSpec is not empty, use it for deployment template\n\t\/\/ we already know that if app.PodSpec is not empty app.DeploymentSpec.Template.Spec is empty\n\tif !reflect.DeepEqual(app.PodSpec, api_v1.PodSpec{}) {\n\t\tdeploymentSpec.Template.Spec = app.PodSpec\n\t}\n\n\t\/\/ TODO: check if this wasn't set by user, in that case we shouldn't ovewrite it\n\tdeploymentSpec.Template.ObjectMeta.Name = app.Name\n\n\t\/\/ TODO: merge with already existing labels and avoid duplication\n\tdeploymentSpec.Template.ObjectMeta.Labels = app.Labels\n\n\tdeployment := ext_v1beta1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: app.Name,\n\t\t\tLabels: app.Labels,\n\t\t},\n\t\tSpec: deploymentSpec,\n\t}\n\n\treturn &deployment, nil\n}\n\n\/\/ create PVC reading the root level persistent volume field\nfunc createPVC(v spec.VolumeClaim, labels map[string]string) (*api_v1.PersistentVolumeClaim, error) {\n\t\/\/ check for conditions where user has given both conflicting fields\n\t\/\/ or not given either fields\n\tif v.Size != \"\" && v.Resources.Requests != nil {\n\t\treturn nil, fmt.Errorf(\"persistent volume %q, cannot provide size and resources at the same time\", v.Name)\n\t}\n\tif v.Size == \"\" && v.Resources.Requests == nil {\n\t\treturn nil, fmt.Errorf(\"persistent volume %q, please provide size or resources, none given\", v.Name)\n\t}\n\n\t\/\/ if user has given size then create a \"api_v1.ResourceRequirements\"\n\t\/\/ because this can be fed to pvc directly\n\tif v.Size != \"\" {\n\t\tsize, err := resource.ParseQuantity(v.Size)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"could not read volume size\")\n\t\t}\n\t\t\/\/ update the volume's resource so that it can be fed\n\t\tv.Resources = api_v1.ResourceRequirements{\n\t\t\tRequests: api_v1.ResourceList{\n\t\t\t\tapi_v1.ResourceStorage: size,\n\t\t\t},\n\t\t}\n\t}\n\t\/\/ setting the default accessmode if none given by user\n\tif len(v.AccessModes) == 0 {\n\t\tv.AccessModes = []api_v1.PersistentVolumeAccessMode{api_v1.ReadWriteOnce}\n\t}\n\tpvc := &api_v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: v.Name,\n\t\t\tLabels: labels,\n\t\t},\n\t\t\/\/ since we updated the pvc spec before so this can be directly fed\n\t\t\/\/ without having to do any addition extra\n\t\tSpec: api_v1.PersistentVolumeClaimSpec(v.PersistentVolumeClaimSpec),\n\t}\n\treturn pvc, nil\n}\n\n\/\/ Since we are automatically creating pvc from\n\/\/ root level persistent volume and entry in the container\n\/\/ volume mount, we also need to update the pod's volume field\nfunc populateVolumes(app *spec.App) error {\n\tfor cn, c := range app.PodSpec.Containers {\n\t\tfor vn, vm := range c.VolumeMounts {\n\t\t\tif isPVCDefined(app.VolumeClaims, vm.Name) && !isVolumeDefined(app.Volumes, vm.Name) {\n\t\t\t\tapp.Volumes = append(app.Volumes, api_v1.Volume{\n\t\t\t\t\tName: vm.Name,\n\t\t\t\t\tVolumeSource: api_v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &api_v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: vm.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t} else if !isVolumeDefined(app.Volumes, vm.Name) {\n\t\t\t\t\/\/ pvc is not defined so we need to check if the entry is made in the pod volumes\n\t\t\t\t\/\/ since a volumeMount entry without entry in pod level volumes might cause failure\n\t\t\t\t\/\/ while deployment since that would not be a complete configuration\n\t\t\t\treturn fmt.Errorf(\"neither root level Persistent Volume\"+\n\t\t\t\t\t\" nor Volume in pod spec defined for %q, \"+\n\t\t\t\t\t\"in app.containers[%d].volumeMounts[%d]\", vm.Name, cn, vn)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createSecrets(app *spec.App) ([]runtime.Object, error) {\n\tvar secrets []runtime.Object\n\n\tfor _, s := range app.Secrets {\n\t\tsecret := &api_v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: s.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tData: s.Data,\n\t\t\tStringData: s.StringData,\n\t\t\tType: s.Type,\n\t\t}\n\t\tsecrets = append(secrets, secret)\n\t}\n\treturn secrets, nil\n}\n\n\/\/ CreateK8sObjects, if given object spec.App, this function reads\n\/\/ them and returns kubernetes objects as list of runtime.Object\n\/\/ If the app is using field 'extraResources' then it will\n\/\/ also return file names mentioned there as list of string\nfunc CreateK8sObjects(app *spec.App) ([]runtime.Object, []string, error) {\n\tvar objects []runtime.Object\n\n\tif app.Labels == nil {\n\t\tapp.Labels = getLabels(app)\n\t}\n\n\tsvcs, err := createServices(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"Unable to create Kubernetes Service\")\n\t}\n\n\tings, err := createIngresses(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"Unable to create Kubernetes Ingresses\")\n\t}\n\n\tsecs, err := createSecrets(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"Unable to create Kubernetes Secrets\")\n\t}\n\n\tapp.PodSpec.Containers, err = populateContainers(app.Containers, app.ConfigMaps, app.Secrets)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\tlog.Debugf(\"object after population: %#v\\n\", app)\n\n\tapp.PodSpec.InitContainers, err = populateContainers(app.InitContainers, app.ConfigMaps, app.Secrets)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\tlog.Debugf(\"object after population: %#v\\n\", app)\n\n\t\/\/ create pvc for each root level persistent volume\n\tvar pvcs []runtime.Object\n\tfor _, v := range app.VolumeClaims {\n\t\tpvc, err := createPVC(v, app.Labels)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t\t}\n\t\tpvcs = append(pvcs, pvc)\n\t}\n\tif err := populateVolumes(app); err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\n\tvar configMap []runtime.Object\n\tfor _, cd := range app.ConfigMaps {\n\t\tcm := &api_v1.ConfigMap{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: cd.Name,\n\t\t\t\tLabels: app.Labels,\n\t\t\t},\n\t\t\tData: cd.Data,\n\t\t}\n\n\t\tconfigMap = append(configMap, cm)\n\t}\n\n\tdeployment, err := createDeployment(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"app %q\", app.Name)\n\t}\n\n\t\/\/ please keep the order of the artifacts addition as it is\n\n\t\/\/ adding non-controller objects\n\tobjects = append(objects, pvcs...)\n\tlog.Debugf(\"app: %s, pvc: %s\\n\", app.Name, spew.Sprint(pvcs))\n\n\tobjects = append(objects, svcs...)\n\tlog.Debugf(\"app: %s, service: %s\\n\", app.Name, spew.Sprint(svcs))\n\n\tobjects = append(objects, ings...)\n\tlog.Debugf(\"app: %s, ingress: %s\\n\", app.Name, spew.Sprint(ings))\n\n\tobjects = append(objects, secs...)\n\tlog.Debugf(\"app: %s, secret: %s\\n\", app.Name, spew.Sprint(secs))\n\n\tobjects = append(objects, configMap...)\n\tlog.Debugf(\"app: %s, configMap: %s\\n\", app.Name, spew.Sprint(configMap))\n\n\t\/\/ add new non-controller objects after this\n\n\t\/\/ adding controller objects\n\t\/\/ deployment will be nil if no deployment is generated and no error occurs,\n\t\/\/ so we only need to append this when a legit deployment resource is returned\n\tif deployment != nil {\n\t\tobjects = append(objects, deployment)\n\t\tlog.Debugf(\"app: %s, deployment: %s\\n\", app.Name, spew.Sprint(deployment))\n\t}\n\t\/\/ add new controllers after this\n\n\treturn objects, app.ExtraResources, nil\n}\n\n\/\/ Transform function if given spec.App data creates the versioned\n\/\/ kubernetes objects and returns them in list of runtime.Object\n\/\/ And if the field in spec.App called 'extraResources' is used\n\/\/ then it returns the filenames mentioned there as list of string\nfunc Transform(app *spec.App) ([]runtime.Object, []string, error) {\n\n\truntimeObjects, extraResources, err := CreateK8sObjects(app)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to create Kubernetes objects\")\n\t}\n\n\tif len(runtimeObjects) == 0 {\n\t\treturn nil, nil, errors.New(\"No runtime objects created, possibly because not enough input data was passed\")\n\t}\n\n\tfor _, runtimeObject := range runtimeObjects {\n\n\t\tgvk, isUnversioned, err := api.Scheme.ObjectKind(runtimeObject)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Wrap(err, \"ConvertToVersion failed\")\n\t\t}\n\t\tif isUnversioned {\n\t\t\treturn nil, nil, fmt.Errorf(\"ConvertToVersion failed: can't output unversioned type: %T\", runtimeObject)\n\t\t}\n\n\t\truntimeObject.GetObjectKind().SetGroupVersionKind(gvk)\n\t}\n\n\treturn runtimeObjects, extraResources, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/dmnlk\/gomadare\"\n\t\"github.com\/k0kubun\/pp\"\n)\n\nfunc main() {\n\tck := os.Getenv(\"ck\")\n\tcs := os.Getenv(\"cs\")\n\tat := os.Getenv(\"at\")\n\tas := os.Getenv(\"as\")\n\tclient := gomadare.NewClient(ck, cs, at, as)\n\tclient.GetUserStream(nil, func (s Status, e Event) {\n\t\tif s != nil {\n\t\t\tpp.Println(s)\n\t\t}\n\t\tif e != nil {\n\t\t\tpp.Print(e)\n\t\t}\n\t})\n}\n\n<commit_msg>fix sample<commit_after>package main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/dmnlk\/gomadare\"\n\t\"github.com\/k0kubun\/pp\"\n\t\"fmt\"\n)\n\nfunc main() {\n\tck := os.Getenv(\"ck\")\n\tcs := os.Getenv(\"cs\")\n\tat := os.Getenv(\"at\")\n\tas := os.Getenv(\"as\")\n\tclient := gomadare.NewClient(ck, cs, at, as)\n\tclient.GetUserStream(nil, func(s gomadare.Status, e gomadare.Event) {\n\t\tif &s != nil {\n\t\t\tfmt.Println(\"return status\")\n\t\t\tpp.Print(s)\n\t\t}\n\t\tif &e != nil {\n\t\t\tfmt.Println(\"return event\")\n\t\t\tpp.Print(e)\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\ntype ChannelMessageList struct {\n\t\/\/ unique identifier of the channel message list\n\tId int64 `json:\"id\"`\n\n\t\/\/ Id of the channel\n\tChannelId int64 `json:\"channelId\" sql:\"NOT NULL\"`\n\n\t\/\/ Id of the message\n\tMessageId int64 `json:\"messageId\" sql:\"NOT NULL\"`\n\n\t\/\/ Addition date of the message to the channel\n\tAddedAt time.Time `json:\"addedAt\" sql:\"NOT NULL\"`\n}\n\nfunc (c *ChannelMessageList) BeforeCreate() {\n\tc.AddedAt = time.Now()\n}\n\nfunc (c *ChannelMessageList) BeforeUpdate() {\n\tc.AddedAt = time.Now()\n}\n\nfunc (c *ChannelMessageList) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *ChannelMessageList) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c *ChannelMessageList) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c *ChannelMessageList) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c ChannelMessageList) TableName() string {\n\treturn \"api.channel_message_list\"\n}\n\nfunc NewChannelMessageList() *ChannelMessageList {\n\treturn &ChannelMessageList{}\n}\n\nfunc (c *ChannelMessageList) Fetch() error {\n\treturn bongo.B.Fetch(c)\n}\n\nfunc (c *ChannelMessageList) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *ChannelMessageList) UnreadCount(cp *ChannelParticipant) (int, error) {\n\tif cp.ChannelId == 0 {\n\t\treturn 0, errors.New(\"ChannelId is not set\")\n\t}\n\n\tif cp.AccountId == 0 {\n\t\treturn 0, errors.New(\"AccountId is not set\")\n\t}\n\n\tif cp.LastSeenAt.IsZero() {\n\t\treturn 0, errors.New(\"Last seen at date is not valid - it is zero\")\n\t}\n\n\treturn bongo.B.Count(c,\n\t\t\"channel_id = ? and added_at > ?\",\n\t\tcp.ChannelId,\n\t\t\/\/ todo change this format to get from a specific place\n\t\tcp.LastSeenAt.UTC().Format(time.RFC822Z),\n\t)\n}\n\nfunc (c *ChannelMessageList) Create() error {\n\treturn bongo.B.Create(c)\n}\n\nfunc (c *ChannelMessageList) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *ChannelMessageList) List(q *Query) (*HistoryResponse, error) {\n\tmessageList, err := c.getMessages(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thr := NewHistoryResponse()\n\thr.MessageList = messageList\n\n\tunreadCount := 0\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.ChannelId\n\tcp.AccountId = q.AccountId\n\terr = cp.FetchParticipant()\n\t\/\/ we are forcing unread count to 0 if user is not a participant\n\t\/\/ of the channel\n\tif err != nil && err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\tif err == nil {\n\t\tunreadCount, err = c.UnreadCount(cp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thr.UnreadCount = unreadCount\n\treturn hr, nil\n}\n\nfunc (c *ChannelMessageList) getMessages(q *Query) ([]*ChannelMessageContainer, error) {\n\tvar messages []int64\n\n\tif c.ChannelId == 0 {\n\t\treturn nil, errors.New(\"ChannelId is not set\")\n\t}\n\n\tif err := bongo.B.DB.Table(c.TableName()).\n\t\tOrder(\"added_at desc\").\n\t\tWhere(\"channel_id = ?\", c.ChannelId).\n\t\tOffset(q.Skip).\n\t\tLimit(q.Limit).\n\t\tPluck(\"message_id\", &messages).\n\t\tError; err != nil {\n\t\treturn nil, err\n\t}\n\n\tparent := NewChannelMessage()\n\tchannelMessages, err := parent.FetchByIds(messages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpopulatedChannelMessages, err := c.populateChannelMessages(channelMessages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn populatedChannelMessages, nil\n}\n\nfunc (c *ChannelMessageList) populateChannelMessages(channelMessages []ChannelMessage) ([]*ChannelMessageContainer, error) {\n\tchannelMessageCount := len(channelMessages)\n\n\tpopulatedChannelMessages := make([]*ChannelMessageContainer, channelMessageCount)\n\n\tif channelMessageCount == 0 {\n\t\treturn populatedChannelMessages, nil\n\t}\n\n\tfor i := 0; i < channelMessageCount; i++ {\n\t\tcm := channelMessages[i]\n\t\tcmc, err := cm.FetchRelatives()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpopulatedChannelMessages[i] = cmc\n\n\t\tmr := NewMessageReply()\n\t\tmr.MessageId = cm.Id\n\t\treplies, err := mr.List()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpopulatedChannelMessagesReplies := make([]*ChannelMessageContainer, len(replies))\n\n\t\tfor rl := 0; rl < len(replies); rl++ {\n\t\t\tcmrc, err := replies[rl].FetchRelatives()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tpopulatedChannelMessagesReplies[rl] = cmrc\n\t\t}\n\t\tpopulatedChannelMessages[i].Replies = populatedChannelMessagesReplies\n\n\t}\n\treturn populatedChannelMessages, nil\n\n}\n\nfunc (c *ChannelMessageList) FetchMessageChannels(messageId int64) ([]Channel, error) {\n\tvar channelIds []int64\n\n\tq := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"message_id\": messageId,\n\t\t},\n\t\tPluck: \"channel_id\",\n\t}\n\n\terr := bongo.B.Some(c, &channelIds, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewChannel().FetchByIds(channelIds)\n}\n\n\/\/ seperate this fucntion into modelhelper\n\/\/ as setting it to a variadic function\nfunc (c *ChannelMessageList) DeleteMessagesBySelector(selector map[string]interface{}) error {\n\tvar cmls []ChannelMessageList\n\n\terr := bongo.B.Some(c, &cmls, &bongo.Query{Selector: selector})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, cml := range cmls {\n\t\tif err := cml.Delete(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Social: use buildMessage function instead of handling all required operations<commit_after>package models\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\ntype ChannelMessageList struct {\n\t\/\/ unique identifier of the channel message list\n\tId int64 `json:\"id\"`\n\n\t\/\/ Id of the channel\n\tChannelId int64 `json:\"channelId\" sql:\"NOT NULL\"`\n\n\t\/\/ Id of the message\n\tMessageId int64 `json:\"messageId\" sql:\"NOT NULL\"`\n\n\t\/\/ Addition date of the message to the channel\n\tAddedAt time.Time `json:\"addedAt\" sql:\"NOT NULL\"`\n}\n\nfunc (c *ChannelMessageList) BeforeCreate() {\n\tc.AddedAt = time.Now()\n}\n\nfunc (c *ChannelMessageList) BeforeUpdate() {\n\tc.AddedAt = time.Now()\n}\n\nfunc (c *ChannelMessageList) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *ChannelMessageList) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c *ChannelMessageList) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c *ChannelMessageList) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c ChannelMessageList) TableName() string {\n\treturn \"api.channel_message_list\"\n}\n\nfunc NewChannelMessageList() *ChannelMessageList {\n\treturn &ChannelMessageList{}\n}\n\nfunc (c *ChannelMessageList) Fetch() error {\n\treturn bongo.B.Fetch(c)\n}\n\nfunc (c *ChannelMessageList) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *ChannelMessageList) UnreadCount(cp *ChannelParticipant) (int, error) {\n\tif cp.ChannelId == 0 {\n\t\treturn 0, errors.New(\"ChannelId is not set\")\n\t}\n\n\tif cp.AccountId == 0 {\n\t\treturn 0, errors.New(\"AccountId is not set\")\n\t}\n\n\tif cp.LastSeenAt.IsZero() {\n\t\treturn 0, errors.New(\"Last seen at date is not valid - it is zero\")\n\t}\n\n\treturn bongo.B.Count(c,\n\t\t\"channel_id = ? and added_at > ?\",\n\t\tcp.ChannelId,\n\t\t\/\/ todo change this format to get from a specific place\n\t\tcp.LastSeenAt.UTC().Format(time.RFC822Z),\n\t)\n}\n\nfunc (c *ChannelMessageList) Create() error {\n\treturn bongo.B.Create(c)\n}\n\nfunc (c *ChannelMessageList) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *ChannelMessageList) List(q *Query) (*HistoryResponse, error) {\n\tmessageList, err := c.getMessages(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thr := NewHistoryResponse()\n\thr.MessageList = messageList\n\n\tunreadCount := 0\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.ChannelId\n\tcp.AccountId = q.AccountId\n\terr = cp.FetchParticipant()\n\t\/\/ we are forcing unread count to 0 if user is not a participant\n\t\/\/ of the channel\n\tif err != nil && err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\tif err == nil {\n\t\tunreadCount, err = c.UnreadCount(cp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thr.UnreadCount = unreadCount\n\treturn hr, nil\n}\n\nfunc (c *ChannelMessageList) getMessages(q *Query) ([]*ChannelMessageContainer, error) {\n\tvar messages []int64\n\n\tif c.ChannelId == 0 {\n\t\treturn nil, errors.New(\"ChannelId is not set\")\n\t}\n\n\tif err := bongo.B.DB.Table(c.TableName()).\n\t\tOrder(\"added_at desc\").\n\t\tWhere(\"channel_id = ?\", c.ChannelId).\n\t\tOffset(q.Skip).\n\t\tLimit(q.Limit).\n\t\tPluck(\"message_id\", &messages).\n\t\tError; err != nil {\n\t\treturn nil, err\n\t}\n\n\tparent := NewChannelMessage()\n\tchannelMessages, err := parent.FetchByIds(messages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpopulatedChannelMessages, err := c.populateChannelMessages(channelMessages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn populatedChannelMessages, nil\n}\n\nfunc (c *ChannelMessageList) populateChannelMessages(channelMessages []ChannelMessage) ([]*ChannelMessageContainer, error) {\n\tchannelMessageCount := len(channelMessages)\n\n\tpopulatedChannelMessages := make([]*ChannelMessageContainer, channelMessageCount)\n\n\tif channelMessageCount == 0 {\n\t\treturn populatedChannelMessages, nil\n\t}\n\n\tfor i := 0; i < channelMessageCount; i++ {\n\t\tcm := channelMessages[i]\n\t\tcmc, err := cm.BuildMessage()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpopulatedChannelMessages[i] = cmc\n\t}\n\treturn populatedChannelMessages, nil\n\n}\n\nfunc (c *ChannelMessageList) FetchMessageChannels(messageId int64) ([]Channel, error) {\n\tvar channelIds []int64\n\n\tq := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"message_id\": messageId,\n\t\t},\n\t\tPluck: \"channel_id\",\n\t}\n\n\terr := bongo.B.Some(c, &channelIds, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewChannel().FetchByIds(channelIds)\n}\n\n\/\/ seperate this fucntion into modelhelper\n\/\/ as setting it to a variadic function\nfunc (c *ChannelMessageList) DeleteMessagesBySelector(selector map[string]interface{}) error {\n\tvar cmls []ChannelMessageList\n\n\terr := bongo.B.Some(c, &cmls, &bongo.Query{Selector: selector})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, cml := range cmls {\n\t\tif err := cml.Delete(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package fs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"subutai\/config\"\n\t\"subutai\/log\"\n)\n\nfunc IsSubvolumeReadonly(path string) bool {\n\tout, _ := exec.Command(\"btrfs\", \"property\", \"get\", \"-ts\", path).Output()\n\tif strings.Contains(string(out), \"true\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc SubvolumeClone(src, dst string) {\n\terr := exec.Command(\"btrfs\", \"subvolume\", \"snapshot\", config.Agent.LxcPrefix+src, config.Agent.LxcPrefix+dst).Run()\n\tlog.Check(log.FatalLevel, \"Creating snapshot\", err)\n}\n\nfunc SubvolumeDestroy(path string) {\n\tnestedvol, err := exec.Command(\"btrfs\", \"subvolume\", \"list\", \"-o\", config.Agent.LxcPrefix+path).Output()\n\tlog.Check(log.WarnLevel, \"Getting nested subvolumes in \"+config.Agent.LxcPrefix+path, err)\n\tscanner := bufio.NewScanner(bytes.NewReader(nestedvol))\n\tfor scanner.Scan() {\n\t\tline := strings.Fields(scanner.Text())\n\t\tif len(line) > 8 {\n\t\t\tsubvol := strings.Split(line[8], path)\n\t\t\tif len(subvol) > 1 {\n\t\t\t\tSubvolumeDestroy(path + subvol[1])\n\t\t\t}\n\t\t}\n\t}\n\tqgroupDestroy(path)\n\terr = exec.Command(\"btrfs\", \"subvolume\", \"delete\", config.Agent.LxcPrefix+path).Run()\n\tlog.Check(log.FatalLevel, \"Destroying subvolume \"+path, err)\n}\n\nfunc qgroupDestroy(path string) {\n\tindex := id(path)\n\terr := exec.Command(\"btrfs\", \"qgroup\", \"destroy\", index, config.Agent.LxcPrefix).Run()\n\tlog.Check(log.FatalLevel, \"Destroying qgroup \"+path+\" \"+index, err)\n}\n\nfunc id(path string) string {\n\tout, _ := exec.Command(\"btrfs\", \"subvolume\", \"list\", config.Agent.LxcPrefix).Output()\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tline := strings.Fields(scanner.Text())\n\t\tif len(line) > 8 {\n\t\t\tif strings.HasSuffix(line[8], path) {\n\t\t\t\treturn line[1]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc Receive(src, dst, delta string, parent bool) {\n\targs := []string{\"receive\", \"-p\", config.Agent.LxcPrefix + src, config.Agent.LxcPrefix + dst}\n\tif !parent {\n\t\targs = []string{\"receive\", config.Agent.LxcPrefix + dst}\n\t}\n\treceive := exec.Command(\"btrfs\", args...)\n\tinput, err := os.Open(config.Agent.LxcPrefix + \"lxc-data\/tmpdir\/\" + delta)\n\treceive.Stdin = input\n\tlog.Check(log.FatalLevel, \"Opening delta \"+delta, err)\n\tlog.Check(log.FatalLevel, \"Receiving delta \"+delta, receive.Run())\n}\n\nfunc Send(src, dst, delta string) {\n\tnewdelta, err := os.Create(delta)\n\tlog.Check(log.FatalLevel, \"Creating delta \"+delta, err)\n\targs := []string{\"send\", \"-p\", src, dst}\n\tif src == dst {\n\t\targs = []string{\"send\", dst}\n\t}\n\tsend := exec.Command(\"btrfs\", args...)\n\tsend.Stdout = newdelta\n\tlog.Check(log.FatalLevel, \"Sending delta \"+delta, send.Run())\n}\n\nfunc ReadOnly(container string, flag bool) {\n\tfor _, path := range []string{container + \"\/rootfs\/\", \"lxc\/\" + container + \"-opt\", \"lxc-data\/\" + container + \"-var\", \"lxc-data\/\" + container + \"-home\"} {\n\t\targ := []string{\"property\", \"set\", \"-ts\", config.Agent.LxcPrefix + path, \"ro\", strconv.FormatBool(flag)}\n\t\tlog.Check(log.FatalLevel, \"Setting readonly: \"+strconv.FormatBool(flag), exec.Command(\"btrfs\", arg...).Run())\n\t}\n}\n\nfunc Stat(path, index string, raw bool) string {\n\tvar row = map[string]int{\n\t\t\"quota\": 3,\n\t\t\"usage\": 2,\n\t}\n\n\targs := []string{\"qgroup\", \"show\", \"-r\", config.Agent.LxcPrefix}\n\tif raw {\n\t\targs = []string{\"qgroup\", \"show\", \"-r\", \"--raw\", config.Agent.LxcPrefix}\n\t}\n\tout, err := exec.Command(\"btrfs\", args...).Output()\n\tlog.Check(log.FatalLevel, \"Getting btrfs stats\", err)\n\tind := id(path)\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tline := strings.Fields(scanner.Text())\n\t\tif len(line) > 3 {\n\t\t\tif line[0] == \"0\/\"+ind {\n\t\t\t\treturn line[row[index]]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc Quota(path string, size ...string) string {\n\tif size != nil {\n\t\texec.Command(\"btrfs\", \"qgroup\", \"limit\", size[0]+\"G\", config.Agent.LxcPrefix+path).Run()\n\t}\n\treturn Stat(path, \"quota\", false)\n}\n\nfunc GetContainerUUID(contanierName string) string {\n\tvar uuid string\n\tresult, err := exec.Command(\"btrfs\", \"subvolume\", \"list\", \"-u\", config.Agent.LxcPrefix).CombinedOutput()\n\tif err != nil {\n\t\tlog.Error(\"btrfs command execute\", err.Error())\n\t}\n\tresArr := strings.Split(string(result), \"\\n\")\n\tfor _, r := range resArr {\n\t\tif strings.Contains(r, contanierName+\"\/rootfs\") {\n\t\t\trArr := strings.Fields(r)\n\t\t\tuuid = rArr[8]\n\t\t}\n\n\t}\n\treturn uuid\n}\n\nfunc GetChildren(uuid string) []string {\n\tvar child []string\n\tresult, err := exec.Command(\"btrfs\", \"subvolume\", \"list\", \"-q\", config.Agent.LxcPrefix).CombinedOutput()\n\tif err != nil {\n\t\tlog.Error(\"btrfs -q command execute\", err.Error())\n\t}\n\tresultArr := strings.Split(string(result), \"\\n\")\n\tfor _, v := range resultArr {\n\t\tif strings.Contains(v, uuid) {\n\t\t\tvArr := strings.Fields(v)\n\t\t\tchild = append(child, vArr[10])\n\t\t}\n\t}\n\treturn child\n}\n<commit_msg>lxc destroy small fix. SS-4083<commit_after>package fs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"subutai\/config\"\n\t\"subutai\/log\"\n)\n\nfunc IsSubvolumeReadonly(path string) bool {\n\tout, _ := exec.Command(\"btrfs\", \"property\", \"get\", \"-ts\", path).Output()\n\tif strings.Contains(string(out), \"true\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc SubvolumeClone(src, dst string) {\n\terr := exec.Command(\"btrfs\", \"subvolume\", \"snapshot\", config.Agent.LxcPrefix+src, config.Agent.LxcPrefix+dst).Run()\n\tlog.Check(log.FatalLevel, \"Creating snapshot\", err)\n}\n\nfunc SubvolumeDestroy(path string) {\n\tnestedvol, err := exec.Command(\"btrfs\", \"subvolume\", \"list\", \"-o\", config.Agent.LxcPrefix+path).Output()\n\tlog.Check(log.WarnLevel, \"Getting nested subvolumes in \"+config.Agent.LxcPrefix+path, err)\n\tscanner := bufio.NewScanner(bytes.NewReader(nestedvol))\n\tfor scanner.Scan() {\n\t\tline := strings.Fields(scanner.Text())\n\t\tif len(line) > 8 {\n\t\t\tsubvol := strings.Split(line[8], path)\n\t\t\tif len(subvol) > 1 {\n\t\t\t\tSubvolumeDestroy(path + subvol[1])\n\t\t\t}\n\t\t}\n\t}\n\tqgroupDestroy(path)\n\terr = exec.Command(\"btrfs\", \"subvolume\", \"delete\", config.Agent.LxcPrefix+path).Run()\n\tlog.Check(log.WarnLevel, \"Destroying subvolume \"+path, err)\n}\n\nfunc qgroupDestroy(path string) {\n\tindex := id(path)\n\terr := exec.Command(\"btrfs\", \"qgroup\", \"destroy\", index, config.Agent.LxcPrefix).Run()\n\tlog.Check(log.WarnLevel, \"Destroying qgroup \"+path+\" \"+index, err)\n}\n\nfunc id(path string) string {\n\tout, _ := exec.Command(\"btrfs\", \"subvolume\", \"list\", config.Agent.LxcPrefix).Output()\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tline := strings.Fields(scanner.Text())\n\t\tif len(line) > 8 {\n\t\t\tif strings.HasSuffix(line[8], path) {\n\t\t\t\treturn line[1]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc Receive(src, dst, delta string, parent bool) {\n\targs := []string{\"receive\", \"-p\", config.Agent.LxcPrefix + src, config.Agent.LxcPrefix + dst}\n\tif !parent {\n\t\targs = []string{\"receive\", config.Agent.LxcPrefix + dst}\n\t}\n\treceive := exec.Command(\"btrfs\", args...)\n\tinput, err := os.Open(config.Agent.LxcPrefix + \"lxc-data\/tmpdir\/\" + delta)\n\treceive.Stdin = input\n\tlog.Check(log.FatalLevel, \"Opening delta \"+delta, err)\n\tlog.Check(log.FatalLevel, \"Receiving delta \"+delta, receive.Run())\n}\n\nfunc Send(src, dst, delta string) {\n\tnewdelta, err := os.Create(delta)\n\tlog.Check(log.FatalLevel, \"Creating delta \"+delta, err)\n\targs := []string{\"send\", \"-p\", src, dst}\n\tif src == dst {\n\t\targs = []string{\"send\", dst}\n\t}\n\tsend := exec.Command(\"btrfs\", args...)\n\tsend.Stdout = newdelta\n\tlog.Check(log.FatalLevel, \"Sending delta \"+delta, send.Run())\n}\n\nfunc ReadOnly(container string, flag bool) {\n\tfor _, path := range []string{container + \"\/rootfs\/\", \"lxc\/\" + container + \"-opt\", \"lxc-data\/\" + container + \"-var\", \"lxc-data\/\" + container + \"-home\"} {\n\t\targ := []string{\"property\", \"set\", \"-ts\", config.Agent.LxcPrefix + path, \"ro\", strconv.FormatBool(flag)}\n\t\tlog.Check(log.FatalLevel, \"Setting readonly: \"+strconv.FormatBool(flag), exec.Command(\"btrfs\", arg...).Run())\n\t}\n}\n\nfunc Stat(path, index string, raw bool) string {\n\tvar row = map[string]int{\n\t\t\"quota\": 3,\n\t\t\"usage\": 2,\n\t}\n\n\targs := []string{\"qgroup\", \"show\", \"-r\", config.Agent.LxcPrefix}\n\tif raw {\n\t\targs = []string{\"qgroup\", \"show\", \"-r\", \"--raw\", config.Agent.LxcPrefix}\n\t}\n\tout, err := exec.Command(\"btrfs\", args...).Output()\n\tlog.Check(log.FatalLevel, \"Getting btrfs stats\", err)\n\tind := id(path)\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tline := strings.Fields(scanner.Text())\n\t\tif len(line) > 3 {\n\t\t\tif line[0] == \"0\/\"+ind {\n\t\t\t\treturn line[row[index]]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc Quota(path string, size ...string) string {\n\tif size != nil {\n\t\texec.Command(\"btrfs\", \"qgroup\", \"limit\", size[0]+\"G\", config.Agent.LxcPrefix+path).Run()\n\t}\n\treturn Stat(path, \"quota\", false)\n}\n\nfunc GetContainerUUID(contanierName string) string {\n\tvar uuid string\n\tresult, err := exec.Command(\"btrfs\", \"subvolume\", \"list\", \"-u\", config.Agent.LxcPrefix).CombinedOutput()\n\tif err != nil {\n\t\tlog.Error(\"btrfs command execute\", err.Error())\n\t}\n\tresArr := strings.Split(string(result), \"\\n\")\n\tfor _, r := range resArr {\n\t\tif strings.Contains(r, contanierName+\"\/rootfs\") {\n\t\t\trArr := strings.Fields(r)\n\t\t\tuuid = rArr[8]\n\t\t}\n\n\t}\n\treturn uuid\n}\n\nfunc GetChildren(uuid string) []string {\n\tvar child []string\n\tresult, err := exec.Command(\"btrfs\", \"subvolume\", \"list\", \"-q\", config.Agent.LxcPrefix).CombinedOutput()\n\tif err != nil {\n\t\tlog.Error(\"btrfs -q command execute\", err.Error())\n\t}\n\tresultArr := strings.Split(string(result), \"\\n\")\n\tfor _, v := range resultArr {\n\t\tif strings.Contains(v, uuid) {\n\t\t\tvArr := strings.Fields(v)\n\t\t\tchild = append(child, vArr[10])\n\t\t}\n\t}\n\treturn child\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\n\/\/ A Change is a record of source code changes, recording user, language, and delta size.\ntype Change struct {\n\tuser string\n\tlanguage string\n\tlines int\n}\n\ntype lessFunc func(p1, p2 *Change) bool\n\n\/\/ multiSorter implements the Sort interface, sorting the changes within.\ntype multiSorter struct {\n\tchanges []Change\n\tless []lessFunc\n}\n\n\/\/ Sort sorts the argument slice according to the less functions passed to OrderedBy.\nfunc (ms *multiSorter) Sort(changes []Change) {\n\tsort.Sort(ms)\n}\n\n\/\/ OrderedBy returns a Sorter that sorts using the less functions, in order.\n\/\/ Call its Sort method to sort the data.\nfunc OrderedBy(less ...lessFunc) *multiSorter {\n\treturn &multiSorter{\n\t\tchanges: changes,\n\t\tless: less,\n\t}\n}\n\n\/\/ Len is part of sort.Interface.\nfunc (ms *multiSorter) Len() int {\n\treturn len(ms.changes)\n}\n\n\/\/ Swap is part of sort.Interface.\nfunc (ms *multiSorter) Swap(i, j int) {\n\tms.changes[i], ms.changes[j] = ms.changes[j], ms.changes[i]\n}\n\n\/\/ Less is part of sort.Interface. It is implemented by looping along the\n\/\/ less functions until it finds a comparison that is either Less or\n\/\/ !Less. Note that it can call the less functions twice per call. We\n\/\/ could change the functions to return -1, 0, 1 and reduce the\n\/\/ number of calls for greater efficiency: an exercise for the reader.\nfunc (ms *multiSorter) Less(i, j int) bool {\n\tp, q := &ms.changes[i], &ms.changes[j]\n\t\/\/ Try all but the last comparison.\n\tvar k int\n\tfor k = 0; k < len(ms.less)-1; k++ {\n\t\tless := ms.less[k]\n\t\tswitch {\n\t\tcase less(p, q):\n\t\t\t\/\/ p < q, so we have a decision.\n\t\t\treturn true\n\t\tcase less(q, p):\n\t\t\t\/\/ p > q, so we have a decision.\n\t\t\treturn false\n\t\t}\n\t\t\/\/ p == q; try the next comparison.\n\t}\n\t\/\/ All comparisons to here said \"equal\", so just return whatever\n\t\/\/ the final comparison reports.\n\treturn ms.less[k](p, q)\n}\n\nvar changes = []Change{\n\t{\"gri\", \"Go\", 100},\n\t{\"ken\", \"C\", 150},\n\t{\"glenda\", \"Go\", 200},\n\t{\"rsc\", \"Go\", 200},\n\t{\"r\", \"Go\", 100},\n\t{\"ken\", \"Go\", 200},\n\t{\"dmr\", \"C\", 100},\n\t{\"r\", \"C\", 150},\n\t{\"gri\", \"Smalltalk\", 80},\n}\n\n\/\/ ExampleMultiKeys demonstrates a technique for sorting a struct type using different\n\/\/ sets of multiple fields in the comparison. We chain together \"Less\" functions, each of\n\/\/ which compares a single field.\nfunc Example_sortMultiKeys() {\n\t\/\/ Closures that order the Change structure.\n\tuser := func(c1, c2 *Change) bool {\n\t\treturn c1.user < c2.user\n\t}\n\tlanguage := func(c1, c2 *Change) bool {\n\t\treturn c1.language < c2.language\n\t}\n\tincreasingLines := func(c1, c2 *Change) bool {\n\t\treturn c1.lines < c2.lines\n\t}\n\tdecreasingLines := func(c1, c2 *Change) bool {\n\t\treturn c1.lines > c2.lines \/\/ Note: > orders downwards.\n\t}\n\n\t\/\/ Simple use: Sort by user.\n\tOrderedBy(user).Sort(changes)\n\tfmt.Println(\"By user:\", changes)\n\n\t\/\/ multiSorter implements the Sort interface, so we can also do this.\n\tsort.Sort(OrderedBy(user, increasingLines))\n\tfmt.Println(\"By user,<lines:\", changes)\n\n\t\/\/ More examples.\n\tOrderedBy(user, decreasingLines).Sort(changes)\n\tfmt.Println(\"By user,>lines:\", changes)\n\n\tOrderedBy(language, increasingLines).Sort(changes)\n\tfmt.Println(\"By language,<lines:\", changes)\n\n\tOrderedBy(language, increasingLines, user).Sort(changes)\n\tfmt.Println(\"By language,<lines,user:\", changes)\n\n\t\/\/ Output:\n\t\/\/By user: [{dmr C 100} {glenda Go 200} {gri Smalltalk 80} {gri Go 100} {ken Go 200} {ken C 150} {r Go 100} {r C 150} {rsc Go 200}]\n\t\/\/By user,<lines: [{dmr C 100} {glenda Go 200} {gri Smalltalk 80} {gri Go 100} {ken C 150} {ken Go 200} {r Go 100} {r C 150} {rsc Go 200}]\n\t\/\/By user,>lines: [{dmr C 100} {glenda Go 200} {gri Go 100} {gri Smalltalk 80} {ken Go 200} {ken C 150} {r C 150} {r Go 100} {rsc Go 200}]\n\t\/\/By language,<lines: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {ken Go 200} {glenda Go 200} {rsc Go 200} {gri Smalltalk 80}]\n\t\/\/By language,<lines,user: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {glenda Go 200} {ken Go 200} {rsc Go 200} {gri Smalltalk 80}]\n\n}\n<commit_msg>sort: fix Example_sortMultiKeys<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\n\/\/ A Change is a record of source code changes, recording user, language, and delta size.\ntype Change struct {\n\tuser string\n\tlanguage string\n\tlines int\n}\n\ntype lessFunc func(p1, p2 *Change) bool\n\n\/\/ multiSorter implements the Sort interface, sorting the changes within.\ntype multiSorter struct {\n\tchanges []Change\n\tless []lessFunc\n}\n\n\/\/ Sort sorts the argument slice according to the less functions passed to OrderedBy.\nfunc (ms *multiSorter) Sort(changes []Change) {\n\tms.changes = changes\n\tsort.Sort(ms)\n}\n\n\/\/ OrderedBy returns a Sorter that sorts using the less functions, in order.\n\/\/ Call its Sort method to sort the data.\nfunc OrderedBy(less ...lessFunc) *multiSorter {\n\treturn &multiSorter{\n\t\tless: less,\n\t}\n}\n\n\/\/ Len is part of sort.Interface.\nfunc (ms *multiSorter) Len() int {\n\treturn len(ms.changes)\n}\n\n\/\/ Swap is part of sort.Interface.\nfunc (ms *multiSorter) Swap(i, j int) {\n\tms.changes[i], ms.changes[j] = ms.changes[j], ms.changes[i]\n}\n\n\/\/ Less is part of sort.Interface. It is implemented by looping along the\n\/\/ less functions until it finds a comparison that is either Less or\n\/\/ !Less. Note that it can call the less functions twice per call. We\n\/\/ could change the functions to return -1, 0, 1 and reduce the\n\/\/ number of calls for greater efficiency: an exercise for the reader.\nfunc (ms *multiSorter) Less(i, j int) bool {\n\tp, q := &ms.changes[i], &ms.changes[j]\n\t\/\/ Try all but the last comparison.\n\tvar k int\n\tfor k = 0; k < len(ms.less)-1; k++ {\n\t\tless := ms.less[k]\n\t\tswitch {\n\t\tcase less(p, q):\n\t\t\t\/\/ p < q, so we have a decision.\n\t\t\treturn true\n\t\tcase less(q, p):\n\t\t\t\/\/ p > q, so we have a decision.\n\t\t\treturn false\n\t\t}\n\t\t\/\/ p == q; try the next comparison.\n\t}\n\t\/\/ All comparisons to here said \"equal\", so just return whatever\n\t\/\/ the final comparison reports.\n\treturn ms.less[k](p, q)\n}\n\nvar changes = []Change{\n\t{\"gri\", \"Go\", 100},\n\t{\"ken\", \"C\", 150},\n\t{\"glenda\", \"Go\", 200},\n\t{\"rsc\", \"Go\", 200},\n\t{\"r\", \"Go\", 100},\n\t{\"ken\", \"Go\", 200},\n\t{\"dmr\", \"C\", 100},\n\t{\"r\", \"C\", 150},\n\t{\"gri\", \"Smalltalk\", 80},\n}\n\n\/\/ ExampleMultiKeys demonstrates a technique for sorting a struct type using different\n\/\/ sets of multiple fields in the comparison. We chain together \"Less\" functions, each of\n\/\/ which compares a single field.\nfunc Example_sortMultiKeys() {\n\t\/\/ Closures that order the Change structure.\n\tuser := func(c1, c2 *Change) bool {\n\t\treturn c1.user < c2.user\n\t}\n\tlanguage := func(c1, c2 *Change) bool {\n\t\treturn c1.language < c2.language\n\t}\n\tincreasingLines := func(c1, c2 *Change) bool {\n\t\treturn c1.lines < c2.lines\n\t}\n\tdecreasingLines := func(c1, c2 *Change) bool {\n\t\treturn c1.lines > c2.lines \/\/ Note: > orders downwards.\n\t}\n\n\t\/\/ Simple use: Sort by user.\n\tOrderedBy(user).Sort(changes)\n\tfmt.Println(\"By user:\", changes)\n\n\t\/\/ More examples.\n\tOrderedBy(user, increasingLines).Sort(changes)\n\tfmt.Println(\"By user,<lines:\", changes)\n\n\tOrderedBy(user, decreasingLines).Sort(changes)\n\tfmt.Println(\"By user,>lines:\", changes)\n\n\tOrderedBy(language, increasingLines).Sort(changes)\n\tfmt.Println(\"By language,<lines:\", changes)\n\n\tOrderedBy(language, increasingLines, user).Sort(changes)\n\tfmt.Println(\"By language,<lines,user:\", changes)\n\n\t\/\/ Output:\n\t\/\/By user: [{dmr C 100} {glenda Go 200} {gri Smalltalk 80} {gri Go 100} {ken Go 200} {ken C 150} {r Go 100} {r C 150} {rsc Go 200}]\n\t\/\/By user,<lines: [{dmr C 100} {glenda Go 200} {gri Smalltalk 80} {gri Go 100} {ken C 150} {ken Go 200} {r Go 100} {r C 150} {rsc Go 200}]\n\t\/\/By user,>lines: [{dmr C 100} {glenda Go 200} {gri Go 100} {gri Smalltalk 80} {ken Go 200} {ken C 150} {r C 150} {r Go 100} {rsc Go 200}]\n\t\/\/By language,<lines: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {ken Go 200} {glenda Go 200} {rsc Go 200} {gri Smalltalk 80}]\n\t\/\/By language,<lines,user: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {glenda Go 200} {ken Go 200} {rsc Go 200} {gri Smalltalk 80}]\n\n}\n<|endoftext|>"} {"text":"<commit_before>package alerts\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/libopenstorage\/openstorage\/api\"\n\t\"github.com\/portworx\/kvdb\"\n\t\"go.pedge.io\/proto\/time\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype watcherStatus int\ntype watcher struct {\n\tkvcb kvdb.WatchCB\n\tstatus watcherStatus\n\tcb AlertsWatcherFunc\n\tclusterId string\n\tkvdb kvdb.Kvdb\n}\ntype KvAlerts struct {\n\tkvdbName string\n\tkvdbDomain string\n\tkvdbMachines []string\n\tclusterId string\n}\n\nconst (\n\talertsKey = \"alerts\/\"\n\tnextAlertsIdKey = \"nextAlertsId\"\n\tclusterKey = \"cluster\/\"\n\tvolumeKey = \"volume\/\"\n\tnodeKey = \"node\/\"\n\tbootstrap = \"bootstrap\"\n\t\/\/ Name of this alerts client implementation\n\tName = \"alerts_kvdb\"\n\t\/\/ NameTest : This alert instance used only for unit tests\n\tNameTest = \"alerts_kvdb_test\"\n)\n\nconst (\n\twatchBootstrap = watcherStatus(iota)\n\twatchReady\n\twatchError\n)\n\nvar (\n\tkvdbMap map[string]kvdb.Kvdb\n\twatcherMap map[string]*watcher\n\talertsWatchIndex uint64\n\twatchErrors int\n)\n\n\/\/ GetKvdbInstance - Returns a kvdb instance associated with this alert client and clusterId combination\nfunc (kva *KvAlerts) GetKvdbInstance() kvdb.Kvdb {\n\treturn kvdbMap[kva.clusterId]\n}\n\n\/\/ Init initializes a AlertsClient interface implementation\nfunc Init(name string, domain string, machines []string, clusterId string) (AlertsClient, error) {\n\t_, ok := kvdbMap[clusterId]\n\tif !ok {\n\t\tkv, err := kvdb.New(name, domain + \"\/\" + clusterId, machines, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkvdbMap[clusterId] = kv\n\t}\n\t\n\treturn &KvAlerts{kvdbName: name, kvdbDomain: domain, kvdbMachines: machines, clusterId: clusterId}, nil\n}\n\n\/\/ Raise raises an Alert\nfunc (kva *KvAlerts) Raise(a api.Alerts) (api.Alerts, error) {\n\tkv := kva.GetKvdbInstance()\n\tif a.Resource == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn api.Alerts{}, ErrResourceNotFound\n\t}\n\talertId, err := kva.getNextIdFromKVDB()\n\tif err != nil {\n\t\treturn a, err\n\t}\n\ta.Id = alertId\n\ta.Timestamp = prototime.Now()\n\ta.Cleared = false\n\t_, err = kv.Create(getResourceKey(a.Resource)+strconv.FormatInt(a.Id, 10), &a, 0)\n\treturn a, err\n}\n\n\n\/\/ Erase erases an alert\nfunc (kva *KvAlerts) Erase(resourceType api.ResourceType, alertId int64) error {\n\tkv := kva.GetKvdbInstance()\n\n\tif resourceType == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn ErrResourceNotFound\n\t}\n\tvar err error\n\n\t_, err = kv.Delete(getResourceKey(resourceType) + strconv.FormatInt(alertId, 10))\n\treturn err\n}\n\n\/\/ Clear clears an alert\nfunc (kva *KvAlerts) Clear(resourceType api.ResourceType, alertId int64) error {\n\tkv := kva.GetKvdbInstance()\n\tvar (\n\t\terr error\n\t\talert api.Alerts\n\t)\n\tif resourceType == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn ErrResourceNotFound\n\t}\n\n\t_, err = kv.GetVal(getResourceKey(resourceType)+strconv.FormatInt(alertId, 10), &alert)\n\tif err != nil {\n\t\treturn err\n\t}\n\talert.Cleared = true\n\n\t_, err = kv.Update(getResourceKey(resourceType)+strconv.FormatInt(alertId, 10), &alert, 0)\n\treturn err\n}\n\n\/\/ Retrieve retrieves a specific alert\nfunc (kva *KvAlerts) Retrieve(resourceType api.ResourceType, alertId int64) (api.Alerts, error) {\n\tvar (\n\t\talert api.Alerts\n\t\terr error\n\t)\n\tif resourceType == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn api.Alerts{}, ErrResourceNotFound\n\t}\n\tkv := kva.GetKvdbInstance()\n\n\t_, err = kv.GetVal(getResourceKey(resourceType)+strconv.FormatInt(alertId, 10), &alert)\n\n\treturn alert, err\n}\n\n\/\/ Enumerate enumerates alerts\nfunc (kva *KvAlerts) Enumerate(filter api.Alerts) ([]*api.Alerts, error) {\n\tallAlerts := []*api.Alerts{}\n\tresourceAlerts := []*api.Alerts{}\n\tvar err error\n\n\tif filter.Resource != api.ResourceType_UNKNOWN_RESOURCE {\n\t\tresourceAlerts, err = kva.getResourceSpecificAlerts(filter.Resource)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tresourceAlerts, err = kva.getAllAlerts()\n\t}\n\n\tif filter.Severity != 0 {\n\t\tfor _, v := range resourceAlerts {\n\t\t\tif v.Severity <= filter.Severity {\n\t\t\t\tallAlerts = append(allAlerts, v)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tallAlerts = append(allAlerts, resourceAlerts...)\n\t}\n\n\treturn allAlerts, err\n}\n\n\/\/ EnumerateWithinTimeRange enumerates alerts between timeStart and timeEnd\nfunc (kva *KvAlerts) EnumerateWithinTimeRange(\n\ttimeStart time.Time,\n\ttimeEnd time.Time,\n\tresourceType api.ResourceType,\n) ([]*api.Alerts, error) {\n\tallAlerts := []*api.Alerts{}\n\tresourceAlerts := []*api.Alerts{}\n\tvar err error\n\n\tif resourceType != 0 {\n\t\tresourceAlerts, err = kva.getResourceSpecificAlerts(resourceType)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tresourceAlerts, err = kva.getAllAlerts()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tfor _, v := range resourceAlerts {\n\t\talertTime := prototime.TimestampToTime(v.Timestamp)\n\t\tif alertTime.Before(timeEnd) && alertTime.After(timeStart) {\n\t\t\tallAlerts = append(allAlerts, v)\n\t\t}\n\t}\n\treturn allAlerts, nil\n}\n\n\/\/ Watch on all alerts\nfunc (kva *KvAlerts) Watch(clusterId string, alertsWatcherFunc AlertsWatcherFunc) error {\n\t_, ok := kvdbMap[clusterId]\n\tif !ok {\n\t\tkv, err := kvdb.New(kva.kvdbName, kva.kvdbDomain + \"\/\" + clusterId, kva.kvdbMachines, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkvdbMap[clusterId] = kv\n\t}\n\t\n\tkv := kvdbMap[clusterId]\n\talertsWatcher := &watcher{status: watchBootstrap, cb: alertsWatcherFunc, kvcb: kvdbWatch, kvdb: kv}\n\twatcherKey := clusterId\n\twatcherMap[watcherKey] = alertsWatcher\n\n\tif err := subscribeWatch(watcherKey); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Subscribe for a watch can be in a goroutine. Bootstrap by writing to the key and waiting for an update\n\tretries := 0\n\t\n\tfor alertsWatcher.status == watchBootstrap {\n\t\t_, err := kv.Put(alertsKey+bootstrap, time.Now(), 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif alertsWatcher.status == watchBootstrap {\n\t\t\tretries++\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t}\n\t\tif retries == 5 {\n\t\t\treturn fmt.Errorf(\"Failed to bootstrap watch on %s\", clusterId)\n\t\t}\n\t}\n\tif alertsWatcher.status != watchReady {\n\t\treturn fmt.Errorf(\"Failed to watch on %s\", clusterId)\n\t}\n\n\treturn nil\n}\n\n\/\/ Shutdown\nfunc (kva *KvAlerts) Shutdown() {\n}\n\n\/\/ String\nfunc (kva *KvAlerts) String() string {\n\treturn Name\n}\n\nfunc getResourceKey(resourceType api.ResourceType) string {\n\tif resourceType == api.ResourceType_VOLUMES {\n\t\treturn alertsKey + volumeKey\n\t}\n\tif resourceType == api.ResourceType_NODE {\n\t\treturn alertsKey + nodeKey\n\t}\n\treturn alertsKey + clusterKey\n}\n\nfunc getNextAlertsIdKey() string {\n\treturn alertsKey + nextAlertsIdKey\n}\n\nfunc (kva *KvAlerts) getNextIdFromKVDB() (int64, error) {\n\tkv := kva.GetKvdbInstance()\n\tnextAlertsId := 0\n\tkvp, err := kv.Create(getNextAlertsIdKey(), strconv.FormatInt(int64(nextAlertsId+1), 10), 0)\n\n\tfor err != nil {\n\t\tkvp, err = kv.GetVal(getNextAlertsIdKey(), &nextAlertsId)\n\t\tif err != nil {\n\t\t\terr = ErrNotInitialized\n\t\t\treturn -1, err\n\t\t} \n\t\tprevValue := kvp.Value\n\t\tnewKvp := *kvp\n\t\tnewKvp.Value = []byte(strconv.FormatInt(int64(nextAlertsId+1), 10))\n\t\tkvp, err = kv.CompareAndSet(&newKvp, kvdb.KVFlags(0), prevValue)\n\t}\n\treturn int64(nextAlertsId), err\n}\n\nfunc (kva *KvAlerts) getResourceSpecificAlerts(resourceType api.ResourceType) ([]*api.Alerts, error) {\n\tkv := kva.GetKvdbInstance()\n\tallAlerts := []*api.Alerts{}\n\tkvp, err := kv.Enumerate(getResourceKey(resourceType))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, v := range kvp {\n\t\tvar elem *api.Alerts\n\t\terr = json.Unmarshal(v.Value, &elem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tallAlerts = append(allAlerts, elem)\n\t}\n\treturn allAlerts, nil\n}\n\nfunc (kva *KvAlerts) getAllAlerts() ([]*api.Alerts, error) {\n\tallAlerts := []*api.Alerts{}\n\tclusterAlerts := []*api.Alerts{}\n\tnodeAlerts := []*api.Alerts{}\n\tvolumeAlerts := []*api.Alerts{}\n\tvar err error\n\n\tnodeAlerts, err = kva.getResourceSpecificAlerts(api.ResourceType_NODE)\n\tif err == nil {\n\t\tallAlerts = append(allAlerts, nodeAlerts...)\n\t}\n\tvolumeAlerts, err = kva.getResourceSpecificAlerts(api.ResourceType_VOLUMES)\n\tif err == nil {\n\t\tallAlerts = append(allAlerts, volumeAlerts...)\n\t}\n\tclusterAlerts, err = kva.getResourceSpecificAlerts(api.ResourceType_CLUSTER)\n\tif err == nil {\n\t\tallAlerts = append(allAlerts, clusterAlerts...)\n\t}\n\n\tif len(allAlerts) > 0 {\n\t\treturn allAlerts, nil\n\t} else if len(allAlerts) == 0 {\n\t\treturn nil, fmt.Errorf(\"No alerts raised yet\")\n\t}\n\treturn allAlerts, err\n}\n\nfunc kvdbWatch(prefix string, opaque interface{}, kvp *kvdb.KVPair, err error) error {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\twatcherKey := strings.Split(prefix, \"\/\")[1]\n\t\n\tif err == nil && strings.HasSuffix(kvp.Key, bootstrap) {\n\t\tw := watcherMap[watcherKey]\n\t\tw.status = watchReady\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\tif w:= watcherMap[watcherKey]; w.status == watchBootstrap {\n\t\t\tw.status = watchError\n\t\t\treturn err\n\t\t}\n\t\tif watchErrors == 5 {\n\t\t\treturn fmt.Errorf(\"Too many watch errors (%v)\", watchErrors)\n\t\t}\n\t\twatchErrors++\n\t\tif err := subscribeWatch(watcherKey); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to resubscribe\")\n\t\t}\n\t}\n\n\n\tif strings.HasSuffix(kvp.Key, nextAlertsIdKey) {\n\t\t\/\/ Ignore write on this key\n\t\t\/\/ Todo : Add a map of ignore keys\n\t\treturn nil\n\t}\n\twatchErrors = 0\n\n\tif kvp.ModifiedIndex > alertsWatchIndex {\n\t\talertsWatchIndex = kvp.ModifiedIndex\n\t}\n\n\tw := watcherMap[watcherKey]\n\n\tif kvp.Action == kvdb.KVDelete {\n\t\terr = w.cb(nil, AlertDeleteAction, prefix, kvp.Key)\n\t\treturn err\n\t}\n\n\tvar alert api.Alerts\n\terr = json.Unmarshal(kvp.Value, &alert)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to unmarshal Alert\")\n\t}\n\n\tswitch kvp.Action {\n\tcase kvdb.KVCreate:\n\t\terr = w.cb(&alert, AlertCreateAction, prefix, kvp.Key)\n\tcase kvdb.KVSet:\n\t\terr = w.cb(&alert, AlertUpdateAction, prefix, kvp.Key)\n\tdefault:\n\t\terr = fmt.Errorf(\"Unhandled KV Action\")\n\t}\n\treturn err\n}\n\nfunc subscribeWatch(key string) error {\n\twatchIndex := alertsWatchIndex\n\tif watchIndex != 0 {\n\t\twatchIndex = alertsWatchIndex + 1\n\t}\n\n\tw, ok := watcherMap[key]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Failed to find a watch on cluster : %v\", key)\n\t}\n\t\n\tkv := w.kvdb\n\tif err := kv.WatchTree(alertsKey, watchIndex, nil, w.kvcb); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tkvdbMap = make(map[string]kvdb.Kvdb)\n\twatcherMap = make(map[string]*watcher)\n\tRegister(Name, Init)\n\tRegister(NameTest, Init)\n}\n<commit_msg>Return an error when we get a dubious kvdb callback<commit_after>package alerts\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/libopenstorage\/openstorage\/api\"\n\t\"github.com\/portworx\/kvdb\"\n\t\"go.pedge.io\/proto\/time\"\n\t\"go.pedge.io\/dlog\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype watcherStatus int\ntype watcher struct {\n\tkvcb kvdb.WatchCB\n\tstatus watcherStatus\n\tcb AlertsWatcherFunc\n\tclusterId string\n\tkvdb kvdb.Kvdb\n}\ntype KvAlerts struct {\n\tkvdbName string\n\tkvdbDomain string\n\tkvdbMachines []string\n\tclusterId string\n}\n\nconst (\n\talertsKey = \"alerts\/\"\n\tnextAlertsIdKey = \"nextAlertsId\"\n\tclusterKey = \"cluster\/\"\n\tvolumeKey = \"volume\/\"\n\tnodeKey = \"node\/\"\n\tbootstrap = \"bootstrap\"\n\t\/\/ Name of this alerts client implementation\n\tName = \"alerts_kvdb\"\n\t\/\/ NameTest : This alert instance used only for unit tests\n\tNameTest = \"alerts_kvdb_test\"\n)\n\nconst (\n\twatchBootstrap = watcherStatus(iota)\n\twatchReady\n\twatchError\n)\n\nvar (\n\tkvdbMap map[string]kvdb.Kvdb\n\twatcherMap map[string]*watcher\n\talertsWatchIndex uint64\n\twatchErrors int\n)\n\n\/\/ GetKvdbInstance - Returns a kvdb instance associated with this alert client and clusterId combination\nfunc (kva *KvAlerts) GetKvdbInstance() kvdb.Kvdb {\n\treturn kvdbMap[kva.clusterId]\n}\n\n\/\/ Init initializes a AlertsClient interface implementation\nfunc Init(name string, domain string, machines []string, clusterId string) (AlertsClient, error) {\n\t_, ok := kvdbMap[clusterId]\n\tif !ok {\n\t\tkv, err := kvdb.New(name, domain + \"\/\" + clusterId, machines, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkvdbMap[clusterId] = kv\n\t}\n\t\n\treturn &KvAlerts{kvdbName: name, kvdbDomain: domain, kvdbMachines: machines, clusterId: clusterId}, nil\n}\n\n\/\/ Raise raises an Alert\nfunc (kva *KvAlerts) Raise(a api.Alerts) (api.Alerts, error) {\n\tkv := kva.GetKvdbInstance()\n\tif a.Resource == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn api.Alerts{}, ErrResourceNotFound\n\t}\n\talertId, err := kva.getNextIdFromKVDB()\n\tif err != nil {\n\t\treturn a, err\n\t}\n\ta.Id = alertId\n\ta.Timestamp = prototime.Now()\n\ta.Cleared = false\n\t_, err = kv.Create(getResourceKey(a.Resource)+strconv.FormatInt(a.Id, 10), &a, 0)\n\treturn a, err\n}\n\n\n\/\/ Erase erases an alert\nfunc (kva *KvAlerts) Erase(resourceType api.ResourceType, alertId int64) error {\n\tkv := kva.GetKvdbInstance()\n\n\tif resourceType == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn ErrResourceNotFound\n\t}\n\tvar err error\n\n\t_, err = kv.Delete(getResourceKey(resourceType) + strconv.FormatInt(alertId, 10))\n\treturn err\n}\n\n\/\/ Clear clears an alert\nfunc (kva *KvAlerts) Clear(resourceType api.ResourceType, alertId int64) error {\n\tkv := kva.GetKvdbInstance()\n\tvar (\n\t\terr error\n\t\talert api.Alerts\n\t)\n\tif resourceType == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn ErrResourceNotFound\n\t}\n\n\t_, err = kv.GetVal(getResourceKey(resourceType)+strconv.FormatInt(alertId, 10), &alert)\n\tif err != nil {\n\t\treturn err\n\t}\n\talert.Cleared = true\n\n\t_, err = kv.Update(getResourceKey(resourceType)+strconv.FormatInt(alertId, 10), &alert, 0)\n\treturn err\n}\n\n\/\/ Retrieve retrieves a specific alert\nfunc (kva *KvAlerts) Retrieve(resourceType api.ResourceType, alertId int64) (api.Alerts, error) {\n\tvar (\n\t\talert api.Alerts\n\t\terr error\n\t)\n\tif resourceType == api.ResourceType_UNKNOWN_RESOURCE {\n\t\treturn api.Alerts{}, ErrResourceNotFound\n\t}\n\tkv := kva.GetKvdbInstance()\n\n\t_, err = kv.GetVal(getResourceKey(resourceType)+strconv.FormatInt(alertId, 10), &alert)\n\n\treturn alert, err\n}\n\n\/\/ Enumerate enumerates alerts\nfunc (kva *KvAlerts) Enumerate(filter api.Alerts) ([]*api.Alerts, error) {\n\tallAlerts := []*api.Alerts{}\n\tresourceAlerts := []*api.Alerts{}\n\tvar err error\n\n\tif filter.Resource != api.ResourceType_UNKNOWN_RESOURCE {\n\t\tresourceAlerts, err = kva.getResourceSpecificAlerts(filter.Resource)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tresourceAlerts, err = kva.getAllAlerts()\n\t}\n\n\tif filter.Severity != 0 {\n\t\tfor _, v := range resourceAlerts {\n\t\t\tif v.Severity <= filter.Severity {\n\t\t\t\tallAlerts = append(allAlerts, v)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tallAlerts = append(allAlerts, resourceAlerts...)\n\t}\n\n\treturn allAlerts, err\n}\n\n\/\/ EnumerateWithinTimeRange enumerates alerts between timeStart and timeEnd\nfunc (kva *KvAlerts) EnumerateWithinTimeRange(\n\ttimeStart time.Time,\n\ttimeEnd time.Time,\n\tresourceType api.ResourceType,\n) ([]*api.Alerts, error) {\n\tallAlerts := []*api.Alerts{}\n\tresourceAlerts := []*api.Alerts{}\n\tvar err error\n\n\tif resourceType != 0 {\n\t\tresourceAlerts, err = kva.getResourceSpecificAlerts(resourceType)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tresourceAlerts, err = kva.getAllAlerts()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tfor _, v := range resourceAlerts {\n\t\talertTime := prototime.TimestampToTime(v.Timestamp)\n\t\tif alertTime.Before(timeEnd) && alertTime.After(timeStart) {\n\t\t\tallAlerts = append(allAlerts, v)\n\t\t}\n\t}\n\treturn allAlerts, nil\n}\n\n\/\/ Watch on all alerts\nfunc (kva *KvAlerts) Watch(clusterId string, alertsWatcherFunc AlertsWatcherFunc) error {\n\t_, ok := kvdbMap[clusterId]\n\tif !ok {\n\t\tkv, err := kvdb.New(kva.kvdbName, kva.kvdbDomain + \"\/\" + clusterId, kva.kvdbMachines, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkvdbMap[clusterId] = kv\n\t}\n\t\n\tkv := kvdbMap[clusterId]\n\talertsWatcher := &watcher{status: watchBootstrap, cb: alertsWatcherFunc, kvcb: kvdbWatch, kvdb: kv}\n\twatcherKey := clusterId\n\twatcherMap[watcherKey] = alertsWatcher\n\n\tif err := subscribeWatch(watcherKey); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Subscribe for a watch can be in a goroutine. Bootstrap by writing to the key and waiting for an update\n\tretries := 0\n\t\n\tfor alertsWatcher.status == watchBootstrap {\n\t\t_, err := kv.Put(alertsKey+bootstrap, time.Now(), 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif alertsWatcher.status == watchBootstrap {\n\t\t\tretries++\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t}\n\t\tif retries == 5 {\n\t\t\treturn fmt.Errorf(\"Failed to bootstrap watch on %s\", clusterId)\n\t\t}\n\t}\n\tif alertsWatcher.status != watchReady {\n\t\treturn fmt.Errorf(\"Failed to watch on %s\", clusterId)\n\t}\n\n\treturn nil\n}\n\n\/\/ Shutdown\nfunc (kva *KvAlerts) Shutdown() {\n}\n\n\/\/ String\nfunc (kva *KvAlerts) String() string {\n\treturn Name\n}\n\nfunc getResourceKey(resourceType api.ResourceType) string {\n\tif resourceType == api.ResourceType_VOLUMES {\n\t\treturn alertsKey + volumeKey\n\t}\n\tif resourceType == api.ResourceType_NODE {\n\t\treturn alertsKey + nodeKey\n\t}\n\treturn alertsKey + clusterKey\n}\n\nfunc getNextAlertsIdKey() string {\n\treturn alertsKey + nextAlertsIdKey\n}\n\nfunc (kva *KvAlerts) getNextIdFromKVDB() (int64, error) {\n\tkv := kva.GetKvdbInstance()\n\tnextAlertsId := 0\n\tkvp, err := kv.Create(getNextAlertsIdKey(), strconv.FormatInt(int64(nextAlertsId+1), 10), 0)\n\n\tfor err != nil {\n\t\tkvp, err = kv.GetVal(getNextAlertsIdKey(), &nextAlertsId)\n\t\tif err != nil {\n\t\t\terr = ErrNotInitialized\n\t\t\treturn -1, err\n\t\t} \n\t\tprevValue := kvp.Value\n\t\tnewKvp := *kvp\n\t\tnewKvp.Value = []byte(strconv.FormatInt(int64(nextAlertsId+1), 10))\n\t\tkvp, err = kv.CompareAndSet(&newKvp, kvdb.KVFlags(0), prevValue)\n\t}\n\treturn int64(nextAlertsId), err\n}\n\nfunc (kva *KvAlerts) getResourceSpecificAlerts(resourceType api.ResourceType) ([]*api.Alerts, error) {\n\tkv := kva.GetKvdbInstance()\n\tallAlerts := []*api.Alerts{}\n\tkvp, err := kv.Enumerate(getResourceKey(resourceType))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, v := range kvp {\n\t\tvar elem *api.Alerts\n\t\terr = json.Unmarshal(v.Value, &elem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tallAlerts = append(allAlerts, elem)\n\t}\n\treturn allAlerts, nil\n}\n\nfunc (kva *KvAlerts) getAllAlerts() ([]*api.Alerts, error) {\n\tallAlerts := []*api.Alerts{}\n\tclusterAlerts := []*api.Alerts{}\n\tnodeAlerts := []*api.Alerts{}\n\tvolumeAlerts := []*api.Alerts{}\n\tvar err error\n\n\tnodeAlerts, err = kva.getResourceSpecificAlerts(api.ResourceType_NODE)\n\tif err == nil {\n\t\tallAlerts = append(allAlerts, nodeAlerts...)\n\t}\n\tvolumeAlerts, err = kva.getResourceSpecificAlerts(api.ResourceType_VOLUMES)\n\tif err == nil {\n\t\tallAlerts = append(allAlerts, volumeAlerts...)\n\t}\n\tclusterAlerts, err = kva.getResourceSpecificAlerts(api.ResourceType_CLUSTER)\n\tif err == nil {\n\t\tallAlerts = append(allAlerts, clusterAlerts...)\n\t}\n\n\tif len(allAlerts) > 0 {\n\t\treturn allAlerts, nil\n\t} else if len(allAlerts) == 0 {\n\t\treturn nil, fmt.Errorf(\"No alerts raised yet\")\n\t}\n\treturn allAlerts, err\n}\n\nfunc kvdbWatch(prefix string, opaque interface{}, kvp *kvdb.KVPair, err error) error {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\twatcherKey := strings.Split(prefix, \"\/\")[1]\n\t\n\tif err == nil && strings.HasSuffix(kvp.Key, bootstrap) {\n\t\tw := watcherMap[watcherKey]\n\t\tw.status = watchReady\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\tif w:= watcherMap[watcherKey]; w.status == watchBootstrap {\n\t\t\tw.status = watchError\n\t\t\treturn err\n\t\t}\n\t\tif watchErrors == 5 {\n\t\t\tdlog.Warnf(\"Too many watch errors : %v. Error is %s\", watchErrors, err.Error())\n\t\t}\n\t\twatchErrors++\n\t\tif err := subscribeWatch(watcherKey); err != nil {\n\t\t\tdlog.Warnf(\"Failed to resubscribe : %s\", err.Error())\n\t\t}\n\t\treturn err\n\t}\n\n\n\tif strings.HasSuffix(kvp.Key, nextAlertsIdKey) {\n\t\t\/\/ Ignore write on this key\n\t\t\/\/ Todo : Add a map of ignore keys\n\t\treturn nil\n\t}\n\twatchErrors = 0\n\n\tif kvp.ModifiedIndex > alertsWatchIndex {\n\t\talertsWatchIndex = kvp.ModifiedIndex\n\t}\n\n\tw := watcherMap[watcherKey]\n\n\tif kvp.Action == kvdb.KVDelete {\n\t\terr = w.cb(nil, AlertDeleteAction, prefix, kvp.Key)\n\t\treturn err\n\t}\n\n\tvar alert api.Alerts\n\terr = json.Unmarshal(kvp.Value, &alert)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to unmarshal Alert\")\n\t}\n\n\tswitch kvp.Action {\n\tcase kvdb.KVCreate:\n\t\terr = w.cb(&alert, AlertCreateAction, prefix, kvp.Key)\n\tcase kvdb.KVSet:\n\t\terr = w.cb(&alert, AlertUpdateAction, prefix, kvp.Key)\n\tdefault:\n\t\terr = fmt.Errorf(\"Unhandled KV Action\")\n\t}\n\treturn err\n}\n\nfunc subscribeWatch(key string) error {\n\twatchIndex := alertsWatchIndex\n\tif watchIndex != 0 {\n\t\twatchIndex = alertsWatchIndex + 1\n\t}\n\n\tw, ok := watcherMap[key]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Failed to find a watch on cluster : %v\", key)\n\t}\n\t\n\tkv := w.kvdb\n\tif err := kv.WatchTree(alertsKey, watchIndex, nil, w.kvcb); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tkvdbMap = make(map[string]kvdb.Kvdb)\n\twatcherMap = make(map[string]*watcher)\n\tRegister(Name, Init)\n\tRegister(NameTest, Init)\n}\n<|endoftext|>"} {"text":"<commit_before>package azurerm\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/arm\/storage\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceArmStorageAccount() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceArmStorageAccountCreate,\n\t\tRead: resourceArmStorageAccountRead,\n\t\tUpdate: resourceArmStorageAccountUpdate,\n\t\tDelete: resourceArmStorageAccountDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateArmStorageAccountName,\n\t\t\t},\n\n\t\t\t\"resource_group_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"location\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tStateFunc: azureRMNormalizeLocation,\n\t\t\t},\n\n\t\t\t\"account_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: validateArmStorageAccountType,\n\t\t\t},\n\n\t\t\t\"primary_location\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_location\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_blob_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_blob_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_queue_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_queue_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_table_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_table_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\/\/ NOTE: The API does not appear to expose a secondary file endpoint\n\t\t\t\"primary_file_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\n\tresourceGroupName := d.Get(\"resource_group_name\").(string)\n\tstorageAccountName := d.Get(\"name\").(string)\n\taccountType := d.Get(\"account_type\").(string)\n\tlocation := d.Get(\"location\").(string)\n\ttags := d.Get(\"tags\").(map[string]interface{})\n\n\topts := storage.AccountCreateParameters{\n\t\tLocation: &location,\n\t\tProperties: &storage.AccountPropertiesCreateParameters{\n\t\t\tAccountType: storage.AccountType(accountType),\n\t\t},\n\t\tTags: expandTags(tags),\n\t}\n\n\taccResp, err := client.Create(resourceGroupName, storageAccountName, opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Azure Storage Account '%s': %s\", storageAccountName, err)\n\t}\n\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Azure Storage Account %q: %s\", storageAccountName, err)\n\t}\n\n\t\/\/ The only way to get the ID back apparently is to read the resource again\n\taccount, err := client.GetProperties(resourceGroupName, storageAccountName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving Azure Storage Account %q: %s\", storageAccountName, err)\n\t}\n\n\td.SetId(*account.ID)\n\n\treturn resourceArmStorageAccountRead(d, meta)\n}\n\n\/\/ resourceArmStorageAccountUpdate is unusual in the ARM API where most resources have a combined\n\/\/ and idempotent operation for CreateOrUpdate. In particular updating all of the parameters\n\/\/ available requires a call to Update per parameter...\nfunc resourceArmStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tstorageAccountName := id.Path[\"storageAccounts\"]\n\tresourceGroupName := id.ResourceGroup\n\n\td.Partial(true)\n\n\tif d.HasChange(\"account_type\") {\n\t\taccountType := d.Get(\"account_type\").(string)\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tProperties: &storage.AccountPropertiesUpdateParameters{\n\t\t\t\tAccountType: storage.AccountType(accountType),\n\t\t\t},\n\t\t}\n\t\taccResp, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account type %q: %s\", storageAccountName, err)\n\t\t}\n\t\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account type %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"account_type\")\n\t}\n\n\tif d.HasChange(\"tags\") {\n\t\ttags := d.Get(\"tags\").(map[string]interface{})\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tTags: expandTags(tags),\n\t\t}\n\t\taccResp, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account tags %q: %s\", storageAccountName, err)\n\t\t}\n\t\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account tags %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"tags\")\n\t}\n\n\td.Partial(false)\n\treturn nil\n}\n\nfunc resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := id.Path[\"storageAccounts\"]\n\tresGroup := id.ResourceGroup\n\n\tresp, err := client.GetProperties(resGroup, name)\n\tif err != nil {\n\t\tif resp.StatusCode == http.StatusNotFound {\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error reading the state of AzureRM Storage Account %q: %s\", name, err)\n\t}\n\n\td.Set(\"location\", resp.Location)\n\td.Set(\"account_type\", resp.Properties.AccountType)\n\td.Set(\"primary_location\", resp.Properties.PrimaryLocation)\n\td.Set(\"secondary_location\", resp.Properties.SecondaryLocation)\n\n\tif resp.Properties.PrimaryEndpoints != nil {\n\t\td.Set(\"primary_blob_endpoint\", resp.Properties.PrimaryEndpoints.Blob)\n\t\td.Set(\"primary_queue_endpoint\", resp.Properties.PrimaryEndpoints.Queue)\n\t\td.Set(\"primary_table_endpoint\", resp.Properties.PrimaryEndpoints.Table)\n\t\td.Set(\"primary_file_endpoint\", resp.Properties.PrimaryEndpoints.File)\n\t}\n\n\tif resp.Properties.SecondaryEndpoints != nil {\n\t\tif resp.Properties.SecondaryEndpoints.Blob != nil {\n\t\t\td.Set(\"secondary_blob_endpoint\", resp.Properties.SecondaryEndpoints.Blob)\n\t\t} else {\n\t\t\td.Set(\"secondary_blob_endpoint\", \"\")\n\t\t}\n\t\tif resp.Properties.SecondaryEndpoints.Queue != nil {\n\t\t\td.Set(\"secondary_queue_endpoint\", resp.Properties.SecondaryEndpoints.Queue)\n\t\t} else {\n\t\t\td.Set(\"secondary_queue_endpoint\", \"\")\n\t\t}\n\t\tif resp.Properties.SecondaryEndpoints.Table != nil {\n\t\t\td.Set(\"secondary_table_endpoint\", resp.Properties.SecondaryEndpoints.Table)\n\t\t} else {\n\t\t\td.Set(\"secondary_table_endpoint\", \"\")\n\t\t}\n\t}\n\n\tflattenAndSetTags(d, resp.Tags)\n\n\treturn nil\n}\n\nfunc resourceArmStorageAccountDelete(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := id.Path[\"storageAccounts\"]\n\tresGroup := id.ResourceGroup\n\n\taccResp, err := client.Delete(resGroup, name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error issuing AzureRM delete request for storage account %q: %s\", name, err)\n\t}\n\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response, http.StatusNotFound)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error polling for AzureRM delete request for storage account %q: %s\", name, err)\n\t}\n\n\treturn nil\n}\n\nfunc validateArmStorageAccountName(v interface{}, k string) (ws []string, es []error) {\n\tinput := v.(string)\n\n\tif !regexp.MustCompile(`\\A([a-z0-9]{3,24})\\z`).MatchString(input) {\n\t\tes = append(es, fmt.Errorf(\"name can only consist of lowercase letters and numbers, and must be between 3 and 24 characters long\"))\n\t}\n\n\treturn\n}\n\nfunc validateArmStorageAccountType(v interface{}, k string) (ws []string, es []error) {\n\tvalidAccountTypes := []string{\"standard_lrs\", \"standard_zrs\",\n\t\t\"standard_grs\", \"standard_ragrs\", \"premium_lrs\"}\n\n\tinput := strings.ToLower(v.(string))\n\n\tfor _, valid := range validAccountTypes {\n\t\tif valid == input {\n\t\t\treturn\n\t\t}\n\t}\n\n\tes = append(es, fmt.Errorf(\"Invalid storage account type %q\", input))\n\treturn\n}\n<commit_msg>provider\/azurerm: Add support for exporting the (#6742)<commit_after>package azurerm\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/arm\/storage\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceArmStorageAccount() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceArmStorageAccountCreate,\n\t\tRead: resourceArmStorageAccountRead,\n\t\tUpdate: resourceArmStorageAccountUpdate,\n\t\tDelete: resourceArmStorageAccountDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateArmStorageAccountName,\n\t\t\t},\n\n\t\t\t\"resource_group_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"location\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tStateFunc: azureRMNormalizeLocation,\n\t\t\t},\n\n\t\t\t\"account_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: validateArmStorageAccountType,\n\t\t\t},\n\n\t\t\t\"primary_location\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_location\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_blob_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_blob_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_queue_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_queue_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_table_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_table_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\/\/ NOTE: The API does not appear to expose a secondary file endpoint\n\t\t\t\"primary_file_endpoint\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"primary_access_key\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"secondary_access_key\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\n\tresourceGroupName := d.Get(\"resource_group_name\").(string)\n\tstorageAccountName := d.Get(\"name\").(string)\n\taccountType := d.Get(\"account_type\").(string)\n\tlocation := d.Get(\"location\").(string)\n\ttags := d.Get(\"tags\").(map[string]interface{})\n\n\topts := storage.AccountCreateParameters{\n\t\tLocation: &location,\n\t\tProperties: &storage.AccountPropertiesCreateParameters{\n\t\t\tAccountType: storage.AccountType(accountType),\n\t\t},\n\t\tTags: expandTags(tags),\n\t}\n\n\taccResp, err := client.Create(resourceGroupName, storageAccountName, opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Azure Storage Account '%s': %s\", storageAccountName, err)\n\t}\n\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Azure Storage Account %q: %s\", storageAccountName, err)\n\t}\n\n\t\/\/ The only way to get the ID back apparently is to read the resource again\n\taccount, err := client.GetProperties(resourceGroupName, storageAccountName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving Azure Storage Account %q: %s\", storageAccountName, err)\n\t}\n\n\td.SetId(*account.ID)\n\n\treturn resourceArmStorageAccountRead(d, meta)\n}\n\n\/\/ resourceArmStorageAccountUpdate is unusual in the ARM API where most resources have a combined\n\/\/ and idempotent operation for CreateOrUpdate. In particular updating all of the parameters\n\/\/ available requires a call to Update per parameter...\nfunc resourceArmStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tstorageAccountName := id.Path[\"storageAccounts\"]\n\tresourceGroupName := id.ResourceGroup\n\n\td.Partial(true)\n\n\tif d.HasChange(\"account_type\") {\n\t\taccountType := d.Get(\"account_type\").(string)\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tProperties: &storage.AccountPropertiesUpdateParameters{\n\t\t\t\tAccountType: storage.AccountType(accountType),\n\t\t\t},\n\t\t}\n\t\taccResp, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account type %q: %s\", storageAccountName, err)\n\t\t}\n\t\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account type %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"account_type\")\n\t}\n\n\tif d.HasChange(\"tags\") {\n\t\ttags := d.Get(\"tags\").(map[string]interface{})\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tTags: expandTags(tags),\n\t\t}\n\t\taccResp, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account tags %q: %s\", storageAccountName, err)\n\t\t}\n\t\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account tags %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"tags\")\n\t}\n\n\td.Partial(false)\n\treturn nil\n}\n\nfunc resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := id.Path[\"storageAccounts\"]\n\tresGroup := id.ResourceGroup\n\n\tresp, err := client.GetProperties(resGroup, name)\n\tif err != nil {\n\t\tif resp.StatusCode == http.StatusNotFound {\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error reading the state of AzureRM Storage Account %q: %s\", name, err)\n\t}\n\n\tkeys, err := client.ListKeys(resGroup, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"primary_access_key\", keys.Key1)\n\td.Set(\"secondary_access_key\", keys.Key2)\n\td.Set(\"location\", resp.Location)\n\td.Set(\"account_type\", resp.Properties.AccountType)\n\td.Set(\"primary_location\", resp.Properties.PrimaryLocation)\n\td.Set(\"secondary_location\", resp.Properties.SecondaryLocation)\n\n\tif resp.Properties.PrimaryEndpoints != nil {\n\t\td.Set(\"primary_blob_endpoint\", resp.Properties.PrimaryEndpoints.Blob)\n\t\td.Set(\"primary_queue_endpoint\", resp.Properties.PrimaryEndpoints.Queue)\n\t\td.Set(\"primary_table_endpoint\", resp.Properties.PrimaryEndpoints.Table)\n\t\td.Set(\"primary_file_endpoint\", resp.Properties.PrimaryEndpoints.File)\n\t}\n\n\tif resp.Properties.SecondaryEndpoints != nil {\n\t\tif resp.Properties.SecondaryEndpoints.Blob != nil {\n\t\t\td.Set(\"secondary_blob_endpoint\", resp.Properties.SecondaryEndpoints.Blob)\n\t\t} else {\n\t\t\td.Set(\"secondary_blob_endpoint\", \"\")\n\t\t}\n\t\tif resp.Properties.SecondaryEndpoints.Queue != nil {\n\t\t\td.Set(\"secondary_queue_endpoint\", resp.Properties.SecondaryEndpoints.Queue)\n\t\t} else {\n\t\t\td.Set(\"secondary_queue_endpoint\", \"\")\n\t\t}\n\t\tif resp.Properties.SecondaryEndpoints.Table != nil {\n\t\t\td.Set(\"secondary_table_endpoint\", resp.Properties.SecondaryEndpoints.Table)\n\t\t} else {\n\t\t\td.Set(\"secondary_table_endpoint\", \"\")\n\t\t}\n\t}\n\n\tflattenAndSetTags(d, resp.Tags)\n\n\treturn nil\n}\n\nfunc resourceArmStorageAccountDelete(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := id.Path[\"storageAccounts\"]\n\tresGroup := id.ResourceGroup\n\n\taccResp, err := client.Delete(resGroup, name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error issuing AzureRM delete request for storage account %q: %s\", name, err)\n\t}\n\t_, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response, http.StatusNotFound)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error polling for AzureRM delete request for storage account %q: %s\", name, err)\n\t}\n\n\treturn nil\n}\n\nfunc validateArmStorageAccountName(v interface{}, k string) (ws []string, es []error) {\n\tinput := v.(string)\n\n\tif !regexp.MustCompile(`\\A([a-z0-9]{3,24})\\z`).MatchString(input) {\n\t\tes = append(es, fmt.Errorf(\"name can only consist of lowercase letters and numbers, and must be between 3 and 24 characters long\"))\n\t}\n\n\treturn\n}\n\nfunc validateArmStorageAccountType(v interface{}, k string) (ws []string, es []error) {\n\tvalidAccountTypes := []string{\"standard_lrs\", \"standard_zrs\",\n\t\t\"standard_grs\", \"standard_ragrs\", \"premium_lrs\"}\n\n\tinput := strings.ToLower(v.(string))\n\n\tfor _, valid := range validAccountTypes {\n\t\tif valid == input {\n\t\t\treturn\n\t\t}\n\t}\n\n\tes = append(es, fmt.Errorf(\"Invalid storage account type %q\", input))\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestHandlerForDisallowedMethods(t *testing.T) {\n\tdisallowedMethods := []string{\"GET\", \"DELETE\", \"PUT\", \"TRACE\", \"PATCH\"}\n\trandomUrls := []string{\"\/\", \"\/blah\"}\n\n\tfor _, method := range disallowedMethods {\n\t\tfor _, url := range randomUrls {\n\t\t\tt.Run(method+url, func(t *testing.T) {\n\t\t\t\trequest, err := http.NewRequest(method, \"localhost:8080\"+url, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to create request: %v\", err)\n\t\t\t\t}\n\t\t\t\trecorder := httptest.NewRecorder()\n\t\t\t\thandleViolationReport(recorder, request)\n\n\t\t\t\tresponse := recorder.Result()\n\t\t\t\tdefer response.Body.Close()\n\n\t\t\t\tif response.StatusCode != http.StatusMethodNotAllowed {\n\t\t\t\t\tt.Errorf(\"Expected HTTP status %v; got %v\", http.StatusMethodNotAllowed, response.Status)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n<commit_msg>Don't include hostname in `urlStr` param<commit_after>package main\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestHandlerForDisallowedMethods(t *testing.T) {\n\tdisallowedMethods := []string{\"GET\", \"DELETE\", \"PUT\", \"TRACE\", \"PATCH\"}\n\trandomUrls := []string{\"\/\", \"\/blah\"}\n\n\tfor _, method := range disallowedMethods {\n\t\tfor _, url := range randomUrls {\n\t\t\tt.Run(method+url, func(t *testing.T) {\n\t\t\t\trequest, err := http.NewRequest(method, url, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to create request: %v\", err)\n\t\t\t\t}\n\t\t\t\trecorder := httptest.NewRecorder()\n\t\t\t\thandleViolationReport(recorder, request)\n\n\t\t\t\tresponse := recorder.Result()\n\t\t\t\tdefer response.Body.Close()\n\n\t\t\t\tif response.StatusCode != http.StatusMethodNotAllowed {\n\t\t\t\t\tt.Errorf(\"Expected HTTP status %v; got %v\", http.StatusMethodNotAllowed, response.StatusCode)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ package mojo_dev_profile implements the mojo_dev profile.\n\/\/ Users must pass the \"--mojo-dev.dir\" flag when installing the profile,\n\/\/ pointing it to a checkout of the mojo repo. It is the user's responsibility\n\/\/ to sync and build the mojo checkout.\npackage mojo_dev_profile\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"v.io\/jiri\"\n\t\"v.io\/jiri\/profiles\"\n\t\"v.io\/jiri\/profiles\/profilesmanager\"\n\t\"v.io\/x\/lib\/envvar\"\n)\n\nvar (\n\tmojoDirFlagName = \"\"\n\tmojoDir = \"\"\n)\n\nfunc Register(installer, profile string) {\n\tm := &Manager{\n\t\tprofileInstaller: installer,\n\t\tprofileName: profile,\n\t\tversionInfo: profiles.NewVersionInfo(profile, map[string]interface{}{\n\t\t\t\"0\": nil,\n\t\t}, \"0\"),\n\t}\n\tprofilesmanager.Register(m)\n}\n\ntype Manager struct {\n\tprofileInstaller, profileName, qualifiedName string\n\tversionInfo *profiles.VersionInfo\n}\n\nfunc (m Manager) Name() string {\n\treturn m.profileName\n}\n\nfunc (m Manager) Installer() string {\n\treturn m.profileInstaller\n}\n\nfunc (m Manager) String() string {\n\treturn fmt.Sprintf(\"%s[%s]\", m.qualifiedName, m.versionInfo.Default())\n}\n\nfunc (m Manager) VersionInfo() *profiles.VersionInfo {\n\treturn m.versionInfo\n}\n\nfunc (m Manager) Info() string {\n\treturn `Sets up a mojo compilation environment based on a mojo checkout specified in the --mojo-dev.dir flag.`\n}\n\nfunc (m *Manager) AddFlags(flags *flag.FlagSet, action profiles.Action) {\n\tmojoDirFlagName = m.profileName + \".dir\"\n\tif action == profiles.Install {\n\t\tflags.StringVar(&mojoDir, mojoDirFlagName, \"\", \"Path of mojo repo checkout.\")\n\t}\n}\n\nfunc (m *Manager) Install(jirix *jiri.X, pdb *profiles.DB, root jiri.RelPath, target profiles.Target) error {\n\tif mojoDir == \"\" {\n\t\treturn fmt.Errorf(\"flag %q must be set\", mojoDirFlagName)\n\t}\n\tif !filepath.IsAbs(mojoDir) {\n\t\treturn fmt.Errorf(\"flag %q must be absolute path: %s\", mojoDirFlagName, mojoDir)\n\t}\n\n\tmojoBuildDir := filepath.Join(mojoDir, \"src\", \"out\", \"Debug\")\n\tif target.OS() == \"android\" {\n\t\tmojoBuildDir = filepath.Join(mojoDir, \"src\", \"out\", \"android_Debug\")\n\t}\n\n\ttarget.Env.Vars = envvar.MergeSlices(target.Env.Vars, []string{\n\t\t\"CGO_CFLAGS=-I\" + filepath.Join(mojoDir, \"src\"),\n\t\t\"CGO_CXXFLAGS=-I\" + filepath.Join(mojoDir, \"src\"),\n\t\t\"CGO_LDFLAGS=-L\" + filepath.Join(mojoBuildDir, \"obj\", \"mojo\") + \" -lsystem_thunks\",\n\t\t\"GOPATH=\" + mojoDir + \":\" + filepath.Join(mojoBuildDir, \"gen\", \"go\"),\n\t\t\"MOJO_DEVTOOLS=\" + filepath.Join(mojoDir, \"src\", \"mojo\", \"devtools\", \"common\"),\n\t\t\"MOJO_SDK=\" + filepath.Join(mojoDir),\n\t\t\"MOJO_SHELL=\" + filepath.Join(mojoBuildDir, \"mojo_shell\"),\n\t\t\"MOJO_SERVICES=\" + mojoBuildDir,\n\t\t\"MOJO_SYSTEM_THUNKS=\" + filepath.Join(mojoBuildDir, \"obj\", \"mojo\", \"libsystem_thunks.a\"),\n\t})\n\n\tif target.OS() == \"android\" {\n\t\ttarget.Env.Vars = envvar.MergeSlices(target.Env.Vars, []string{\n\t\t\t\"ANDROID_PLATFORM_TOOLS=\" + filepath.Join(mojoDir, \"src\", \"third_party\", \"android_tools\", \"sdk\", \"platform-tools\"),\n\t\t\t\"MOJO_SHELL=\" + filepath.Join(mojoBuildDir, \"apks\", \"MojoShell.apk\"),\n\t\t})\n\t}\n\n\tpdb.InstallProfile(m.profileInstaller, m.profileName, \"mojo-dev\") \/\/ Needed to confirm installation, but nothing will be inside.\n\treturn pdb.AddProfileTarget(m.profileInstaller, m.profileName, target)\n}\n\nfunc (m *Manager) Uninstall(jirix *jiri.X, pdb *profiles.DB, root jiri.RelPath, target profiles.Target) error {\n\tpdb.RemoveProfileTarget(m.profileInstaller, m.profileName, target)\n\treturn nil\n}\n<commit_msg>x\/devtools: Use mojodev instead of mojo-dev<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ package mojo_dev_profile implements the mojodev profile.\n\/\/ Users must pass the \"--mojodev.dir\" flag when installing the profile,\n\/\/ pointing it to a checkout of the mojo repo. It is the user's responsibility\n\/\/ to sync and build the mojo checkout.\npackage mojo_dev_profile\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"v.io\/jiri\"\n\t\"v.io\/jiri\/profiles\"\n\t\"v.io\/jiri\/profiles\/profilesmanager\"\n\t\"v.io\/x\/lib\/envvar\"\n)\n\nvar (\n\tmojoDirFlagName = \"\"\n\tmojoDir = \"\"\n)\n\nfunc Register(installer, profile string) {\n\tm := &Manager{\n\t\tprofileInstaller: installer,\n\t\tprofileName: profile,\n\t\tversionInfo: profiles.NewVersionInfo(profile, map[string]interface{}{\n\t\t\t\"0\": nil,\n\t\t}, \"0\"),\n\t}\n\tprofilesmanager.Register(m)\n}\n\ntype Manager struct {\n\tprofileInstaller, profileName, qualifiedName string\n\tversionInfo *profiles.VersionInfo\n}\n\nfunc (m Manager) Name() string {\n\treturn m.profileName\n}\n\nfunc (m Manager) Installer() string {\n\treturn m.profileInstaller\n}\n\nfunc (m Manager) String() string {\n\treturn fmt.Sprintf(\"%s[%s]\", m.qualifiedName, m.versionInfo.Default())\n}\n\nfunc (m Manager) VersionInfo() *profiles.VersionInfo {\n\treturn m.versionInfo\n}\n\nfunc (m Manager) Info() string {\n\treturn `Sets up a mojo compilation environment based on a mojo checkout specified in the --mojodev.dir flag.`\n}\n\nfunc (m *Manager) AddFlags(flags *flag.FlagSet, action profiles.Action) {\n\tmojoDirFlagName = m.profileName + \".dir\"\n\tif action == profiles.Install {\n\t\tflags.StringVar(&mojoDir, mojoDirFlagName, \"\", \"Path of mojo repo checkout.\")\n\t}\n}\n\nfunc (m *Manager) Install(jirix *jiri.X, pdb *profiles.DB, root jiri.RelPath, target profiles.Target) error {\n\tif mojoDir == \"\" {\n\t\treturn fmt.Errorf(\"flag %q must be set\", mojoDirFlagName)\n\t}\n\tif !filepath.IsAbs(mojoDir) {\n\t\treturn fmt.Errorf(\"flag %q must be absolute path: %s\", mojoDirFlagName, mojoDir)\n\t}\n\n\tmojoBuildDir := filepath.Join(mojoDir, \"src\", \"out\", \"Debug\")\n\tif target.OS() == \"android\" {\n\t\tmojoBuildDir = filepath.Join(mojoDir, \"src\", \"out\", \"android_Debug\")\n\t}\n\n\ttarget.Env.Vars = envvar.MergeSlices(target.Env.Vars, []string{\n\t\t\"CGO_CFLAGS=-I\" + filepath.Join(mojoDir, \"src\"),\n\t\t\"CGO_CXXFLAGS=-I\" + filepath.Join(mojoDir, \"src\"),\n\t\t\"CGO_LDFLAGS=-L\" + filepath.Join(mojoBuildDir, \"obj\", \"mojo\") + \" -lsystem_thunks\",\n\t\t\"GOPATH=\" + mojoDir + \":\" + filepath.Join(mojoBuildDir, \"gen\", \"go\"),\n\t\t\"MOJO_DEVTOOLS=\" + filepath.Join(mojoDir, \"src\", \"mojo\", \"devtools\", \"common\"),\n\t\t\"MOJO_SDK=\" + filepath.Join(mojoDir),\n\t\t\"MOJO_SHELL=\" + filepath.Join(mojoBuildDir, \"mojo_shell\"),\n\t\t\"MOJO_SERVICES=\" + mojoBuildDir,\n\t\t\"MOJO_SYSTEM_THUNKS=\" + filepath.Join(mojoBuildDir, \"obj\", \"mojo\", \"libsystem_thunks.a\"),\n\t})\n\n\tif target.OS() == \"android\" {\n\t\ttarget.Env.Vars = envvar.MergeSlices(target.Env.Vars, []string{\n\t\t\t\"ANDROID_PLATFORM_TOOLS=\" + filepath.Join(mojoDir, \"src\", \"third_party\", \"android_tools\", \"sdk\", \"platform-tools\"),\n\t\t\t\"MOJO_SHELL=\" + filepath.Join(mojoBuildDir, \"apks\", \"MojoShell.apk\"),\n\t\t})\n\t}\n\n\tpdb.InstallProfile(m.profileInstaller, m.profileName, \"mojodev\") \/\/ Needed to confirm installation, but nothing will be inside.\n\treturn pdb.AddProfileTarget(m.profileInstaller, m.profileName, target)\n}\n\nfunc (m *Manager) Uninstall(jirix *jiri.X, pdb *profiles.DB, root jiri.RelPath, target profiles.Target) error {\n\tpdb.RemoveProfileTarget(m.profileInstaller, m.profileName, target)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package awsup\n\nimport (\n\t\"fmt\"\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ I believe one vCPU ~ 3 ECUS, and 60 CPU credits would be needed to use one vCPU for an hour\nconst BurstableCreditsToECUS float32 = 3.0 \/ 60.0\n\ntype AWSMachineTypeInfo struct {\n\tName string\n\tMemoryGB float32\n\tECU float32\n\tCores int\n\tEphemeralDisks []int\n\tBurstable bool\n}\n\ntype EphemeralDevice struct {\n\tDeviceName string\n\tVirtualName string\n\tSizeGB int\n}\n\nfunc (m *AWSMachineTypeInfo) EphemeralDevices() []*EphemeralDevice {\n\tvar disks []*EphemeralDevice\n\tfor i, sizeGB := range m.EphemeralDisks {\n\t\td := &EphemeralDevice{\n\t\t\tSizeGB: sizeGB,\n\t\t}\n\n\t\tif i >= 20 {\n\t\t\t\/\/ TODO: What drive letters do we use?\n\t\t\tglog.Fatalf(\"ephemeral devices for > 20 not yet implemented\")\n\t\t}\n\t\td.DeviceName = \"\/dev\/sd\" + string('c'+i)\n\t\td.VirtualName = fmt.Sprintf(\"ephemeral%d\", i)\n\n\t\tdisks = append(disks, d)\n\t}\n\treturn disks\n}\n\nfunc GetMachineTypeInfo(machineType string) (*AWSMachineTypeInfo, error) {\n\tfor i := range MachineTypes {\n\t\tm := &MachineTypes[i]\n\t\tif m.Name == machineType {\n\t\t\treturn m, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"instance type not handled: %q\", machineType)\n}\n\nvar MachineTypes []AWSMachineTypeInfo = []AWSMachineTypeInfo{\n\t\/\/ This is tedious, but seems simpler than trying to have some logic and then a lot of exceptions\n\n\t\/\/ t2 family\n\t{\n\t\tName: \"t2.nano\",\n\t\tMemoryGB: 0.5,\n\t\tECU: 3 * BurstableCreditsToECUS,\n\t\tCores: 1,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.micro\",\n\t\tMemoryGB: 1,\n\t\tECU: 6 * BurstableCreditsToECUS,\n\t\tCores: 1,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.small\",\n\t\tMemoryGB: 2,\n\t\tECU: 12 * BurstableCreditsToECUS,\n\t\tCores: 1,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.medium\",\n\t\tMemoryGB: 4,\n\t\tECU: 24 * BurstableCreditsToECUS,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.large\",\n\t\tMemoryGB: 8,\n\t\tECU: 36 * BurstableCreditsToECUS,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\n\t\/\/ m3 family\n\t{\n\t\tName: \"m3.medium\",\n\t\tMemoryGB: 3.75,\n\t\tECU: 3,\n\t\tCores: 1,\n\t\tEphemeralDisks: []int{4},\n\t},\n\t{\n\t\tName: \"m3.large\",\n\t\tMemoryGB: 7.5,\n\t\tECU: 6.5,\n\t\tCores: 2,\n\t\tEphemeralDisks: []int{32},\n\t},\n\t{\n\t\tName: \"m3.xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 13,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{40, 40},\n\t},\n\t{\n\t\tName: \"m3.2xlarge\",\n\t\tMemoryGB: 30,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{80, 80},\n\t},\n\n\t\/\/ m4 family\n\t{\n\t\tName: \"m4.large\",\n\t\tMemoryGB: 8,\n\t\tECU: 6.5,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.xlarge\",\n\t\tMemoryGB: 16,\n\t\tECU: 13,\n\t\tCores: 4,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.2xlarge\",\n\t\tMemoryGB: 32,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.4xlarge\",\n\t\tMemoryGB: 64,\n\t\tECU: 53.5,\n\t\tCores: 16,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.10xlarge\",\n\t\tMemoryGB: 160,\n\t\tECU: 124.5,\n\t\tCores: 40,\n\t\tEphemeralDisks: nil,\n\t},\n\n\t\/\/ c3 family\n\t{\n\t\tName: \"c3.large\",\n\t\tMemoryGB: 3.75,\n\t\tECU: 7,\n\t\tCores: 2,\n\t\tEphemeralDisks: []int{16, 16},\n\t},\n\t{\n\t\tName: \"c3.xlarge\",\n\t\tMemoryGB: 7.5,\n\t\tECU: 14,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{40, 40},\n\t},\n\t{\n\t\tName: \"c3.2xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 28,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{80, 80},\n\t},\n\t{\n\t\tName: \"c3.4xlarge\",\n\t\tMemoryGB: 30,\n\t\tECU: 55,\n\t\tCores: 16,\n\t\tEphemeralDisks: []int{160, 160},\n\t},\n\t{\n\t\tName: \"c3.8xlarge\",\n\t\tMemoryGB: 60,\n\t\tECU: 108,\n\t\tCores: 32,\n\t\tEphemeralDisks: []int{320, 320},\n\t},\n\n\t\/\/ c4 family\n\t{\n\t\tName: \"c4.large\",\n\t\tMemoryGB: 3.75,\n\t\tECU: 8,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.xlarge\",\n\t\tMemoryGB: 7.5,\n\t\tECU: 16,\n\t\tCores: 4,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.2xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 31,\n\t\tCores: 8,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.4xlarge\",\n\t\tMemoryGB: 30,\n\t\tECU: 62,\n\t\tCores: 16,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.8xlarge\",\n\t\tMemoryGB: 60,\n\t\tECU: 132,\n\t\tCores: 32,\n\t\tEphemeralDisks: nil,\n\t},\n\n\t\/\/ r3 family\n\t{\n\t\tName: \"r3.large\",\n\t\tMemoryGB: 15.25,\n\t\tECU: 6.5,\n\t\tCores: 2,\n\t\tEphemeralDisks: []int{32},\n\t},\n\t{\n\t\tName: \"r3.xlarge\",\n\t\tMemoryGB: 30.5,\n\t\tECU: 13,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{80},\n\t},\n\t{\n\t\tName: \"r3.2xlarge\",\n\t\tMemoryGB: 61,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{160},\n\t},\n\t{\n\t\tName: \"r3.4xlarge\",\n\t\tMemoryGB: 122,\n\t\tECU: 52,\n\t\tCores: 16,\n\t\tEphemeralDisks: []int{320},\n\t},\n\t{\n\t\tName: \"r3.8xlarge\",\n\t\tMemoryGB: 244,\n\t\tECU: 104,\n\t\tCores: 32,\n\t\tEphemeralDisks: []int{320, 320},\n\t},\n}\n<commit_msg>More instance type support: g2, i2, x1 families<commit_after>package awsup\n\nimport (\n\t\"fmt\"\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ I believe one vCPU ~ 3 ECUS, and 60 CPU credits would be needed to use one vCPU for an hour\nconst BurstableCreditsToECUS float32 = 3.0 \/ 60.0\n\ntype AWSMachineTypeInfo struct {\n\tName string\n\tMemoryGB float32\n\tECU float32\n\tCores int\n\tEphemeralDisks []int\n\tBurstable bool\n}\n\ntype EphemeralDevice struct {\n\tDeviceName string\n\tVirtualName string\n\tSizeGB int\n}\n\nfunc (m *AWSMachineTypeInfo) EphemeralDevices() []*EphemeralDevice {\n\tvar disks []*EphemeralDevice\n\tfor i, sizeGB := range m.EphemeralDisks {\n\t\td := &EphemeralDevice{\n\t\t\tSizeGB: sizeGB,\n\t\t}\n\n\t\tif i >= 20 {\n\t\t\t\/\/ TODO: What drive letters do we use?\n\t\t\tglog.Fatalf(\"ephemeral devices for > 20 not yet implemented\")\n\t\t}\n\t\td.DeviceName = \"\/dev\/sd\" + string('c'+i)\n\t\td.VirtualName = fmt.Sprintf(\"ephemeral%d\", i)\n\n\t\tdisks = append(disks, d)\n\t}\n\treturn disks\n}\n\nfunc GetMachineTypeInfo(machineType string) (*AWSMachineTypeInfo, error) {\n\tfor i := range MachineTypes {\n\t\tm := &MachineTypes[i]\n\t\tif m.Name == machineType {\n\t\t\treturn m, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"instance type not handled: %q\", machineType)\n}\n\nvar MachineTypes []AWSMachineTypeInfo = []AWSMachineTypeInfo{\n\t\/\/ This is tedious, but seems simpler than trying to have some logic and then a lot of exceptions\n\n\t\/\/ t2 family\n\t{\n\t\tName: \"t2.nano\",\n\t\tMemoryGB: 0.5,\n\t\tECU: 3 * BurstableCreditsToECUS,\n\t\tCores: 1,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.micro\",\n\t\tMemoryGB: 1,\n\t\tECU: 6 * BurstableCreditsToECUS,\n\t\tCores: 1,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.small\",\n\t\tMemoryGB: 2,\n\t\tECU: 12 * BurstableCreditsToECUS,\n\t\tCores: 1,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.medium\",\n\t\tMemoryGB: 4,\n\t\tECU: 24 * BurstableCreditsToECUS,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\t{\n\t\tName: \"t2.large\",\n\t\tMemoryGB: 8,\n\t\tECU: 36 * BurstableCreditsToECUS,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t\tBurstable: true,\n\t},\n\n\t\/\/ m3 family\n\t{\n\t\tName: \"m3.medium\",\n\t\tMemoryGB: 3.75,\n\t\tECU: 3,\n\t\tCores: 1,\n\t\tEphemeralDisks: []int{4},\n\t},\n\t{\n\t\tName: \"m3.large\",\n\t\tMemoryGB: 7.5,\n\t\tECU: 6.5,\n\t\tCores: 2,\n\t\tEphemeralDisks: []int{32},\n\t},\n\t{\n\t\tName: \"m3.xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 13,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{40, 40},\n\t},\n\t{\n\t\tName: \"m3.2xlarge\",\n\t\tMemoryGB: 30,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{80, 80},\n\t},\n\n\t\/\/ m4 family\n\t{\n\t\tName: \"m4.large\",\n\t\tMemoryGB: 8,\n\t\tECU: 6.5,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.xlarge\",\n\t\tMemoryGB: 16,\n\t\tECU: 13,\n\t\tCores: 4,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.2xlarge\",\n\t\tMemoryGB: 32,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.4xlarge\",\n\t\tMemoryGB: 64,\n\t\tECU: 53.5,\n\t\tCores: 16,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"m4.10xlarge\",\n\t\tMemoryGB: 160,\n\t\tECU: 124.5,\n\t\tCores: 40,\n\t\tEphemeralDisks: nil,\n\t},\n\n\t\/\/ c3 family\n\t{\n\t\tName: \"c3.large\",\n\t\tMemoryGB: 3.75,\n\t\tECU: 7,\n\t\tCores: 2,\n\t\tEphemeralDisks: []int{16, 16},\n\t},\n\t{\n\t\tName: \"c3.xlarge\",\n\t\tMemoryGB: 7.5,\n\t\tECU: 14,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{40, 40},\n\t},\n\t{\n\t\tName: \"c3.2xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 28,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{80, 80},\n\t},\n\t{\n\t\tName: \"c3.4xlarge\",\n\t\tMemoryGB: 30,\n\t\tECU: 55,\n\t\tCores: 16,\n\t\tEphemeralDisks: []int{160, 160},\n\t},\n\t{\n\t\tName: \"c3.8xlarge\",\n\t\tMemoryGB: 60,\n\t\tECU: 108,\n\t\tCores: 32,\n\t\tEphemeralDisks: []int{320, 320},\n\t},\n\n\t\/\/ c4 family\n\t{\n\t\tName: \"c4.large\",\n\t\tMemoryGB: 3.75,\n\t\tECU: 8,\n\t\tCores: 2,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.xlarge\",\n\t\tMemoryGB: 7.5,\n\t\tECU: 16,\n\t\tCores: 4,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.2xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 31,\n\t\tCores: 8,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.4xlarge\",\n\t\tMemoryGB: 30,\n\t\tECU: 62,\n\t\tCores: 16,\n\t\tEphemeralDisks: nil,\n\t},\n\t{\n\t\tName: \"c4.8xlarge\",\n\t\tMemoryGB: 60,\n\t\tECU: 132,\n\t\tCores: 32,\n\t\tEphemeralDisks: nil,\n\t},\n\n\t\/\/ g2 family\n\t{\n\t\tName: \"g2.2xlarge\",\n\t\tMemoryGB: 15,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{60},\n\t},\n\t{\n\t\tName: \"g2.8xlarge\",\n\t\tMemoryGB: 60,\n\t\tECU: 104,\n\t\tCores: 32,\n\t\tEphemeralDisks: []int{120, 120},\n\t},\n\n\t\/\/ i2 family\n\t{\n\t\tName: \"i2.xlarge\",\n\t\tMemoryGB: 30.5,\n\t\tECU: 14,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{800},\n\t},\n\t{\n\t\tName: \"i2.2xlarge\",\n\t\tMemoryGB: 61,\n\t\tECU: 27,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{800, 800},\n\t},\n\t{\n\t\tName: \"i2.4xlarge\",\n\t\tMemoryGB: 122,\n\t\tECU: 53,\n\t\tCores: 16,\n\t\tEphemeralDisks: []int{800, 800, 800, 800},\n\t},\n\t{\n\t\tName: \"i2.8xlarge\",\n\t\tMemoryGB: 244,\n\t\tECU: 104,\n\t\tCores: 32,\n\t\tEphemeralDisks: []int{800, 800, 800, 800, 800, 800, 800, 800},\n\t},\n\n\t\/\/ r3 family\n\t{\n\t\tName: \"r3.large\",\n\t\tMemoryGB: 15.25,\n\t\tECU: 6.5,\n\t\tCores: 2,\n\t\tEphemeralDisks: []int{32},\n\t},\n\t{\n\t\tName: \"r3.xlarge\",\n\t\tMemoryGB: 30.5,\n\t\tECU: 13,\n\t\tCores: 4,\n\t\tEphemeralDisks: []int{80},\n\t},\n\t{\n\t\tName: \"r3.2xlarge\",\n\t\tMemoryGB: 61,\n\t\tECU: 26,\n\t\tCores: 8,\n\t\tEphemeralDisks: []int{160},\n\t},\n\t{\n\t\tName: \"r3.4xlarge\",\n\t\tMemoryGB: 122,\n\t\tECU: 52,\n\t\tCores: 16,\n\t\tEphemeralDisks: []int{320},\n\t},\n\t{\n\t\tName: \"r3.8xlarge\",\n\t\tMemoryGB: 244,\n\t\tECU: 104,\n\t\tCores: 32,\n\t\tEphemeralDisks: []int{320, 320},\n\t},\n\n\t\/\/ x1 family\n\t{\n\t\tName: \"x1.32xlarge\",\n\t\tMemoryGB: 1952,\n\t\tECU: 349,\n\t\tCores: 128,\n\t\tEphemeralDisks: []int{1920, 1920},\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package trampoline sets machine to a specific state defined by multiboot v1\n\/\/ spec and jumps to the intended kernel.\n\/\/\n\/\/ https:\/\/www.gnu.org\/software\/grub\/manual\/multiboot\/multiboot.html#Machine-state.\npackage trampoline\n\nimport (\n\t\"io\"\n\t\"reflect\"\n\t\"unsafe\"\n\n\t\"github.com\/u-root\/u-root\/pkg\/ubinary\"\n)\n\nconst (\n\ttrampolineEntry = \"u-root-entry-long\"\n\ttrampolineInfo = \"u-root-info-long\"\n\ttrampolineMagic = \"u-root-mb-magic\"\n)\n\nfunc start()\nfunc end()\nfunc info()\nfunc magic()\nfunc entry()\n\n\/\/ funcPC gives the program counter of the given function.\n\/\/\n\/\/go:linkname funcPC runtime.funcPC\nfunc funcPC(f interface{}) uintptr\n\n\/\/ Setup scans file for trampoline code and sets\n\/\/ values for multiboot info address and kernel entry point.\nfunc Setup(path string, magic, infoAddr, entryPoint uintptr) ([]byte, error) {\n\ttrampStart, d, err := extract(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patch(trampStart, d, magic, infoAddr, entryPoint)\n}\n\n\/\/ extract extracts trampoline segment from file.\n\/\/ trampoline segment begins after \"u-root-trampoline-begin\" byte sequence + padding,\n\/\/ and ends at \"u-root-trampoline-end\" byte sequence.\nfunc extract(path string) (uintptr, []byte, error) {\n\t\/\/ TODO(https:\/\/github.com\/golang\/go\/issues\/35055): deal with\n\t\/\/ potentially non-contiguous trampoline. Rather than locating start\n\t\/\/ and end, we should locate start,boot,farjump{32,64},gdt,info,entry\n\t\/\/ individually and return one potentially really big trampoline slice.\n\ttbegin := funcPC(start)\n\ttend := funcPC(end)\n\tif tend <= tbegin {\n\t\treturn 0, nil, io.ErrUnexpectedEOF\n\t}\n\ttramp := ptrToSlice(tbegin, int(tend-tbegin))\n\n\t\/\/ tramp is read-only executable memory. So we gotta copy it to a\n\t\/\/ slice. Gotta modify it later.\n\tcp := append([]byte(nil), tramp...)\n\treturn tbegin, cp, nil\n}\n\nfunc ptrToSlice(ptr uintptr, size int) []byte {\n\tvar data []byte\n\n\tsh := (*reflect.SliceHeader)(unsafe.Pointer(&data))\n\tsh.Data = ptr\n\tsh.Len = size\n\tsh.Cap = size\n\n\treturn data\n}\n\n\/\/ patch patches the trampoline code to store value for multiboot info address,\n\/\/ entry point, and boot magic value.\n\/\/\n\/\/ All 3 are determined by pretending they are functions, and finding their PC\n\/\/ within our own address space.\nfunc patch(trampStart uintptr, trampoline []byte, magicVal, infoAddr, entryPoint uintptr) ([]byte, error) {\n\treplace := func(start uintptr, d []byte, f func(), val uint32) error {\n\t\tbuf := make([]byte, 4)\n\t\tubinary.NativeEndian.PutUint32(buf, val)\n\n\t\toffset := funcPC(f) - start\n\t\tif int(offset+4) > len(d) {\n\t\t\treturn io.ErrUnexpectedEOF\n\t\t}\n\t\tcopy(d[int(offset):], buf)\n\t\treturn nil\n\t}\n\n\tif err := replace(trampStart, trampoline, info, uint32(infoAddr)); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := replace(trampStart, trampoline, entry, uint32(entryPoint)); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := replace(trampStart, trampoline, magic, uint32(magicVal)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn trampoline, nil\n}\n<commit_msg>trampoline: rename potentially insensitive var name<commit_after>\/\/ Copyright 2018 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package trampoline sets machine to a specific state defined by multiboot v1\n\/\/ spec and jumps to the intended kernel.\n\/\/\n\/\/ https:\/\/www.gnu.org\/software\/grub\/manual\/multiboot\/multiboot.html#Machine-state.\npackage trampoline\n\nimport (\n\t\"io\"\n\t\"reflect\"\n\t\"unsafe\"\n\n\t\"github.com\/u-root\/u-root\/pkg\/ubinary\"\n)\n\nconst (\n\ttrampolineEntry = \"u-root-entry-long\"\n\ttrampolineInfo = \"u-root-info-long\"\n\ttrampolineMagic = \"u-root-mb-magic\"\n)\n\nfunc start()\nfunc end()\nfunc info()\nfunc magic()\nfunc entry()\n\n\/\/ funcPC gives the program counter of the given function.\n\/\/\n\/\/go:linkname funcPC runtime.funcPC\nfunc funcPC(f interface{}) uintptr\n\n\/\/ Setup scans file for trampoline code and sets\n\/\/ values for multiboot info address and kernel entry point.\nfunc Setup(path string, magic, infoAddr, entryPoint uintptr) ([]byte, error) {\n\ttrampolineStart, d, err := extract(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patch(trampolineStart, d, magic, infoAddr, entryPoint)\n}\n\n\/\/ extract extracts trampoline segment from file.\n\/\/ trampoline segment begins after \"u-root-trampoline-begin\" byte sequence + padding,\n\/\/ and ends at \"u-root-trampoline-end\" byte sequence.\nfunc extract(path string) (uintptr, []byte, error) {\n\t\/\/ TODO(https:\/\/github.com\/golang\/go\/issues\/35055): deal with\n\t\/\/ potentially non-contiguous trampoline. Rather than locating start\n\t\/\/ and end, we should locate start,boot,farjump{32,64},gdt,info,entry\n\t\/\/ individually and return one potentially really big trampoline slice.\n\ttbegin := funcPC(start)\n\ttend := funcPC(end)\n\tif tend <= tbegin {\n\t\treturn 0, nil, io.ErrUnexpectedEOF\n\t}\n\ttramp := ptrToSlice(tbegin, int(tend-tbegin))\n\n\t\/\/ tramp is read-only executable memory. So we gotta copy it to a\n\t\/\/ slice. Gotta modify it later.\n\tcp := append([]byte(nil), tramp...)\n\treturn tbegin, cp, nil\n}\n\nfunc ptrToSlice(ptr uintptr, size int) []byte {\n\tvar data []byte\n\n\tsh := (*reflect.SliceHeader)(unsafe.Pointer(&data))\n\tsh.Data = ptr\n\tsh.Len = size\n\tsh.Cap = size\n\n\treturn data\n}\n\n\/\/ patch patches the trampoline code to store value for multiboot info address,\n\/\/ entry point, and boot magic value.\n\/\/\n\/\/ All 3 are determined by pretending they are functions, and finding their PC\n\/\/ within our own address space.\nfunc patch(trampolineStart uintptr, trampoline []byte, magicVal, infoAddr, entryPoint uintptr) ([]byte, error) {\n\treplace := func(start uintptr, d []byte, f func(), val uint32) error {\n\t\tbuf := make([]byte, 4)\n\t\tubinary.NativeEndian.PutUint32(buf, val)\n\n\t\toffset := funcPC(f) - start\n\t\tif int(offset+4) > len(d) {\n\t\t\treturn io.ErrUnexpectedEOF\n\t\t}\n\t\tcopy(d[int(offset):], buf)\n\t\treturn nil\n\t}\n\n\tif err := replace(trampolineStart, trampoline, info, uint32(infoAddr)); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := replace(trampolineStart, trampoline, entry, uint32(entryPoint)); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := replace(trampolineStart, trampoline, magic, uint32(magicVal)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn trampoline, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package requestbuilder\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n)\n\ntype HttpRequestBuilder interface {\n\tAddParameter(key string, value ...string) HttpRequestBuilder\n\tAddHeader(key string, values ...string) HttpRequestBuilder\n\tSetMethod(key string) HttpRequestBuilder\n\tSetBody(reader io.Reader) HttpRequestBuilder\n\tAddBasicAuth(username, password string) HttpRequestBuilder\n\tAddContentType(contentType string) HttpRequestBuilder\n\tBuild() (*http.Request, error)\n}\n\ntype httpRequestBuilder struct {\n\turl string\n\tparameter map[string][]string\n\theader http.Header\n\tmethod string\n\tbody io.Reader\n\tusername string\n\tpassword string\n}\n\nfunc NewHttpRequestBuilder(url string) *httpRequestBuilder {\n\tr := new(httpRequestBuilder)\n\tr.method = \"GET\"\n\tr.url = url\n\tr.parameter = make(map[string][]string)\n\tr.header = make(http.Header)\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddContentType(contentType string) HttpRequestBuilder {\n\tr.AddHeader(\"Content-Type\", contentType)\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddBasicAuth(username, password string) HttpRequestBuilder {\n\tr.username = username\n\tr.password = password\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) SetBody(body io.Reader) HttpRequestBuilder {\n\tr.body = body\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) SetMethod(method string) HttpRequestBuilder {\n\tr.method = method\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddHeader(key string, values ...string) HttpRequestBuilder {\n\tr.header[key] = values\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddParameter(key string, values ...string) HttpRequestBuilder {\n\tr.parameter[key] = values\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) Build() (*http.Request, error) {\n\treq, err := http.NewRequest(r.method, r.getUrlWithParameter(), r.body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header = r.header\n\tif len(r.username) > 0 || len(r.password) > 0 {\n\t\treq.SetBasicAuth(r.username, r.password)\n\t}\n\treturn req, nil\n}\n\nfunc (r *httpRequestBuilder) getUrlWithParameter() string {\n\tresult := r.url\n\tfirst := true\n\tfor key, values := range r.parameter {\n\t\tfor _, value := range values {\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t\tresult += \"?\"\n\t\t\t} else {\n\t\t\t\tresult += \"&\"\n\t\t\t}\n\t\t\tresult += key\n\t\t\tresult += \"=\"\n\t\t\tresult += value\n\t\t}\n\t}\n\treturn result\n}\n<commit_msg>add contentlength<commit_after>package requestbuilder\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n)\n\ntype HttpRequestBuilder interface {\n\tAddParameter(key string, value ...string) HttpRequestBuilder\n\tAddHeader(key string, values ...string) HttpRequestBuilder\n\tSetMethod(key string) HttpRequestBuilder\n\tSetBody(reader io.Reader) HttpRequestBuilder\n\tAddBasicAuth(username, password string) HttpRequestBuilder\n\tAddContentType(contentType string) HttpRequestBuilder\n\tSetContentLength(contentLength int64) HttpRequestBuilder\n\tBuild() (*http.Request, error)\n}\n\ntype httpRequestBuilder struct {\n\turl string\n\tparameter map[string][]string\n\theader http.Header\n\tmethod string\n\tbody io.Reader\n\tusername string\n\tpassword string\n\tcontentLength int64\n}\n\nfunc NewHttpRequestBuilder(url string) *httpRequestBuilder {\n\tr := new(httpRequestBuilder)\n\tr.method = \"GET\"\n\tr.url = url\n\tr.parameter = make(map[string][]string)\n\tr.header = make(http.Header)\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddContentType(contentType string) HttpRequestBuilder {\n\tr.AddHeader(\"Content-Type\", contentType)\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddBasicAuth(username, password string) HttpRequestBuilder {\n\tr.username = username\n\tr.password = password\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) SetBody(body io.Reader) HttpRequestBuilder {\n\tr.body = body\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) SetMethod(method string) HttpRequestBuilder {\n\tr.method = method\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) SetContentLength(contentLength int64) HttpRequestBuilder {\n\tr.contentLength = contentLength\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddHeader(key string, values ...string) HttpRequestBuilder {\n\tr.header[key] = values\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) AddParameter(key string, values ...string) HttpRequestBuilder {\n\tr.parameter[key] = values\n\treturn r\n}\n\nfunc (r *httpRequestBuilder) Build() (*http.Request, error) {\n\treq, err := http.NewRequest(r.method, r.getUrlWithParameter(), r.body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header = r.header\n\tif len(r.username) > 0 || len(r.password) > 0 {\n\t\treq.SetBasicAuth(r.username, r.password)\n\t}\n\treq.ContentLength = r.contentLength\n\treturn req, nil\n}\n\nfunc (r *httpRequestBuilder) getUrlWithParameter() string {\n\tresult := r.url\n\tfirst := true\n\tfor key, values := range r.parameter {\n\t\tfor _, value := range values {\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t\tresult += \"?\"\n\t\t\t} else {\n\t\t\t\tresult += \"&\"\n\t\t\t}\n\t\t\tresult += key\n\t\t\tresult += \"=\"\n\t\t\tresult += value\n\t\t}\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package effio\n\nimport (\n\t\"code.google.com\/p\/plotinum\/plot\"\n\t\"code.google.com\/p\/plotinum\/plotter\"\n\t\"code.google.com\/p\/plotinum\/vg\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n)\n\ntype Group struct {\n\tName string\n\tTests Tests\n\tGrouping *Grouping\n}\ntype Groups map[string]*Group\ntype Grouping struct {\n\tName string \/\/ group name, e.g. \"by_fio\", \"by_media\"\n\tSuitePath string \/\/ root of the suite, e.g. \/home\/atobey\/src\/effio\/suites\/-id\/\n\tOutPath string \/\/ writing final graphs in this directory\n\tGroups Groups `json:\"-\"` \/\/ e.g. \"samsung_840_read_latency\" => [ t1, t2, ... ]\n\tSuite *Suite `json:\"-\"` \/\/ parent test suite\n}\n\n\/\/ suite_path must be a fully-qualitifed path or Chdirs will fail and crash\nfunc (suite *Suite) GraphAll(suite_path string, out_path string) {\n\t\/\/ various groupings\/pivots that will be graphed\n\tby_fio := NewGrouping(\"by_fio_conf\", out_path, suite_path, suite)\n\tby_dev := NewGrouping(\"by_device\", out_path, suite_path, suite)\n\tby_mda := NewGrouping(\"by_media\", out_path, suite_path, suite)\n\tby_tst := NewGrouping(\"by_test\", out_path, suite_path, suite)\n\tall := []Grouping{by_fio, by_dev, by_mda, by_tst}\n\n\t\/\/ assign tests to groups\n\tfor _, test := range suite.Tests {\n\t\tby_fio.AppendGroup(test.FioConfTmpl.Name, test) \/\/ e.g. \"read_latency_512\" => [ t1, t9, .. ]\n\t\tby_dev.AppendGroup(test.Device.Name, test) \/\/ e.g. \"fusionio_iodriveii\" => [ t3, t7, ...]\n\t\tby_mda.AppendGroup(test.Device.Media, test) \/\/ e.g. \"MLC\" => [t1, t6, ...]\n\t\tby_tst.AppendGroup(test.Name, test) \/\/ ends up 1:1 name => [t1]\n\t}\n\n\tfor _, gg := range all {\n\t\tfor _, g := range gg.Groups {\n\t\t\t\/\/ generate a latency logfile size graph for every group\n\t\t\tg.barFileSizes()\n\n\t\t\t\/\/ load the CSV on demand\n\t\t\t\/\/ at one point this cached loaded tests between runs, but as long\n\t\t\t\/\/ as plotinum is taking minutes to generate graphs with lots of data\n\t\t\t\/\/ points, the file loading doesn't cost enough to matter\n\t\t\tfor _, test := range g.Tests {\n\t\t\t\ttest.LatRecs = LoadCSV(test.LatLogPath(g.Grouping.SuitePath))\n\t\t\t\ttest.LatData = test.LatRecs.Summarize(10000, 10)\n\n\t\t\t\t\/\/ release the memory used by loading the raw data then force a GC\n\t\t\t\t\/\/ otherwise some of the CSV files easily OOM a 16G machine\n\t\t\t\ttest.LatRecs = nil\n\t\t\t\truntime.GC()\n\n\t\t\t\ttest.LatData.WriteFiles(gg.OutPath, fmt.Sprintf(\"%s-%s\", gg.Name, g.Name))\n\t\t\t}\n\n\t\t\t\/\/ generate output\n\t\t\tg.scatterPlot(true)\n\t\t\tg.scatterPlot(false)\n\t\t\tg.barChart(true)\n\t\t\tg.barChart(false)\n\n\t\t\t\/\/ write metadata for the group\/grouping as json\n\t\t\tg.writeJson()\n\t\t}\n\t}\n}\n\nfunc NewGrouping(name string, out_path string, suite_path string, suite *Suite) Grouping {\n\tmbrs := make(Groups)\n\treturn Grouping{name, suite_path, out_path, mbrs, suite}\n}\n\nfunc (gg *Grouping) AppendGroup(key string, test *Test) {\n\tif g, ok := gg.Groups[key]; ok {\n\t\tg.Tests = append(gg.Groups[key].Tests, test)\n\t} else {\n\t\tgg.Groups[key] = &Group{key, Tests{test}, gg}\n\t}\n}\n\nfunc (g *Group) barChart(logscale bool) {\n\tp, err := plot.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating new plot: %s\\n\", err)\n\t}\n\n\t\/\/ TODO: human names for test groups\n\tp.Title.Text = fmt.Sprintf(\"Latency Distribution: %s\", g.Name)\n\tp.X.Label.Text = \"Time Offset\"\n\tp.Y.Label.Text = \"Latency (usec)\"\n\tp.Add(plotter.NewGrid())\n\tp.Legend.Top = true\n\tw := vg.Points(20)\n\n\tfor i, test := range g.Tests {\n\t\tfmt.Printf(\"Histogram for %s: %v\\n\", test.Name, test.LatData.Histogram)\n\t\tbars, err := plotter.NewBarChart(test.LatData.Histogram, w)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to create new barchart for test %s: %s\\n\", test.Name, err)\n\t\t}\n\t\tbars.Color = CustomColors[i]\n\t\tp.Add(bars)\n\t\tp.Legend.Add(fmt.Sprintf(\"read: %s \", test.Device.Name), bars)\n\t}\n\n\tif logscale {\n\t\tp.Y.Scale = plot.LogScale\n\t\tp.Y.Label.Text = \"Latency (usec log(10))\"\n\t\tg.saveGraph(p, \"scatter-logscale\")\n\t} else {\n\t\tg.saveGraph(p, \"scatter\")\n\t}\n}\n\nfunc (g *Group) scatterPlot(logscale bool) {\n\tp, err := plot.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating new plot: %s\\n\", err)\n\t}\n\n\t\/\/ TODO: human names for test groups\n\tp.Title.Text = fmt.Sprintf(\"Latency Distribution: %s\", g.Name)\n\tp.X.Label.Text = \"Time Offset\"\n\tp.Y.Label.Text = \"Latency (usec)\"\n\tp.Add(plotter.NewGrid())\n\tp.Legend.Top = true\n\n\tfor i, test := range g.Tests {\n\t\tif len(test.LatData.RRecSm) > 0 {\n\t\t\t\/\/ reads get circles\n\t\t\trsp, err := plotter.NewScatter(test.LatData.RRecSm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to create new scatter plot for test %s: %s\\n\", test.Name, err)\n\t\t\t}\n\t\t\trsp.Shape = plot.CircleGlyph{}\n\t\t\trsp.GlyphStyle.Color = CustomColors[i]\n\t\t\tp.Add(rsp)\n\t\t\tp.Legend.Add(fmt.Sprintf(\"read: %s \", test.Device.Name), rsp)\n\t\t}\n\n\t\tif len(test.LatData.WRecSm) > 0 {\n\t\t\t\/\/ writes get pyramids, same color\n\t\t\twsp, err := plotter.NewScatter(test.LatData.WRecSm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to create new scatter plot for test %s: %s\\n\", test.Name, err)\n\t\t\t}\n\t\t\twsp.Shape = plot.PyramidGlyph{}\n\t\t\twsp.GlyphStyle.Color = CustomColors[i]\n\t\t\tp.Add(wsp)\n\t\t\tp.Legend.Add(fmt.Sprintf(\"write: %s \", test.Device.Name), wsp)\n\t\t}\n\t}\n\n\tif logscale {\n\t\tp.Y.Scale = plot.LogScale\n\t\tp.Y.Label.Text = \"Latency (usec log(10))\"\n\t\tg.saveGraph(p, \"scatter-logscale\")\n\t} else {\n\t\tg.saveGraph(p, \"scatter\")\n\t}\n}\n\n\/\/ draws a bar graph displaying the sizes of the lat_lat.log files across\n\/\/ all tests\n\/\/ TODO: figure out how to make the bar width respond to the graph width\nfunc (g *Group) barFileSizes() {\n\tsizes := make([]int64, len(g.Tests))\n\tfor i, test := range g.Tests {\n\t\tfi, err := os.Stat(test.LatLogPath(g.Grouping.SuitePath))\n\t\tif err != nil {\n\t\t\tsizes[i] = 0\n\t\t\tcontinue\n\t\t}\n\t\tsizes[i] = fi.Size()\n\t}\n\n\tp, err := plot.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating new plot: %s\\n\", err)\n\t}\n\n\tp.Title.Text = fmt.Sprintf(\"Latency Log Sizes: %s\", g.Name)\n\tp.X.Label.Text = \"Device + Test\"\n\tp.Y.Label.Text = \"Bytes\"\n\tp.Legend.Top = true\n\tp.Add(plotter.NewGrid())\n\n\t\/\/ plotinum doesn't offer a way to draw one group of bars\n\t\/\/ with different colors, so each bar is a group with an offset\n\tvar bw float64 = 20.0\n\tvar count float64 = 0\n\tfor i, test := range g.Tests {\n\t\tif sizes[i] == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tval := plotter.Values{float64(sizes[i])}\n\t\tchart, err := plotter.NewBarChart(val, vg.Points(bw))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error adding bar to plot: %s\\n\", err)\n\t\t}\n\n\t\tchart.Color = CustomColors[i]\n\t\tchart.Offset = vg.Points(count * bw)\n\n\t\tp.Add(chart)\n\t\tp.Legend.Add(test.Name, chart)\n\n\t\tcount += 1\n\t}\n\n\tp.X.Min = 0\n\tp.X.Max = float64(count + 1)\n\n\tg.saveGraph(p, \"bar-log-size\")\n}\n\nfunc (g *Group) writeJson() {\n\tfname := fmt.Sprintf(\"group-%s-%s.json\", g.Grouping.Name, g.Name)\n\toutfile := path.Join(g.Grouping.OutPath, fname)\n\n\tjs, err := json.MarshalIndent(g, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to encode group data as JSON: %s\\n\", err)\n\t}\n\tjs = append(js, byte('\\n'))\n\n\terr = ioutil.WriteFile(outfile, js, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to write group JSON data file '%s': %s\\n\", outfile, err)\n\t}\n}\n\n\/\/ e.g. suites\/-id\/-out\/scatter-by_dev-random-read-512b.jpg\nfunc (g *Group) saveGraph(p *plot.Plot, name string) {\n\tfname := fmt.Sprintf(\"%s-%s-%s.svg\", name, g.Grouping.Name, g.Name)\n\tfpath := path.Join(g.Grouping.OutPath, fname)\n\terr := p.Save(12, 8, fpath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to save %s: %s\\n\", fpath, err)\n\t}\n\tlog.Printf(\"saved graph: '%s'\\n\", fpath)\n}\n<commit_msg>Bugfixes, reduce glyph size, etc.<commit_after>package effio\n\nimport (\n\t\"code.google.com\/p\/plotinum\/plot\"\n\t\"code.google.com\/p\/plotinum\/plotter\"\n\t\"code.google.com\/p\/plotinum\/vg\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n)\n\ntype Group struct {\n\tName string\n\tTests Tests\n\tGrouping *Grouping\n}\ntype Groups map[string]*Group\ntype Grouping struct {\n\tName string \/\/ group name, e.g. \"by_fio\", \"by_media\"\n\tSuitePath string \/\/ root of the suite, e.g. \/home\/atobey\/src\/effio\/suites\/-id\/\n\tOutPath string \/\/ writing final graphs in this directory\n\tGroups Groups `json:\"-\"` \/\/ e.g. \"samsung_840_read_latency\" => [ t1, t2, ... ]\n\tSuite *Suite `json:\"-\"` \/\/ parent test suite\n}\n\n\/\/ suite_path must be a fully-qualitifed path or Chdirs will fail and crash\nfunc (suite *Suite) GraphAll(suite_path string, out_path string) {\n\t\/\/ various groupings\/pivots that will be graphed\n\tby_fio := NewGrouping(\"by_fio_conf\", out_path, suite_path, suite)\n\tby_dev := NewGrouping(\"by_device\", out_path, suite_path, suite)\n\tby_mda := NewGrouping(\"by_media\", out_path, suite_path, suite)\n\tby_tst := NewGrouping(\"by_test\", out_path, suite_path, suite)\n\tall := []Grouping{by_fio, by_dev, by_mda, by_tst}\n\n\t\/\/ assign tests to groups\n\tfor _, test := range suite.Tests {\n\t\tby_fio.AppendGroup(test.FioConfTmpl.Name, test) \/\/ e.g. \"read_latency_512\" => [ t1, t9, .. ]\n\t\tby_dev.AppendGroup(test.Device.Name, test) \/\/ e.g. \"fusionio_iodriveii\" => [ t3, t7, ...]\n\t\tby_mda.AppendGroup(test.Device.Media, test) \/\/ e.g. \"MLC\" => [t1, t6, ...]\n\t\tby_tst.AppendGroup(test.Name, test) \/\/ ends up 1:1 name => [t1]\n\t}\n\n\tfor _, gg := range all {\n\t\tfor _, g := range gg.Groups {\n\t\t\t\/\/ generate a latency logfile size graph for every group\n\t\t\tg.barFileSizes()\n\n\t\t\t\/\/ load the CSV on demand\n\t\t\t\/\/ at one point this cached loaded tests between runs, but as long\n\t\t\t\/\/ as plotinum is taking minutes to generate graphs with lots of data\n\t\t\t\/\/ points, the file loading doesn't cost enough to matter\n\t\t\tfor _, test := range g.Tests {\n\t\t\t\ttest.LatRecs = LoadCSV(test.LatLogPath(g.Grouping.SuitePath))\n\t\t\t\ttest.LatData = test.LatRecs.Summarize(10000, 10)\n\n\t\t\t\t\/\/ release the memory used by loading the raw data then force a GC\n\t\t\t\t\/\/ otherwise some of the CSV files easily OOM a 16G machine\n\t\t\t\ttest.LatRecs = nil\n\t\t\t\truntime.GC()\n\n\t\t\t\ttest.LatData.WriteFiles(gg.OutPath, fmt.Sprintf(\"%s-%s\", gg.Name, g.Name))\n\t\t\t}\n\n\t\t\t\/\/ generate output\n\t\t\tg.scatterPlot(true)\n\t\t\tg.scatterPlot(false)\n\t\t\tg.barChart(true)\n\t\t\tg.barChart(false)\n\n\t\t\t\/\/ write metadata for the group\/grouping as json\n\t\t\tg.writeJson()\n\t\t}\n\t}\n}\n\nfunc NewGrouping(name string, out_path string, suite_path string, suite *Suite) Grouping {\n\tmbrs := make(Groups)\n\treturn Grouping{name, suite_path, out_path, mbrs, suite}\n}\n\nfunc (gg *Grouping) AppendGroup(key string, test *Test) {\n\tif g, ok := gg.Groups[key]; ok {\n\t\tg.Tests = append(gg.Groups[key].Tests, test)\n\t} else {\n\t\tgg.Groups[key] = &Group{key, Tests{test}, gg}\n\t}\n}\n\nfunc (g *Group) barChart(logscale bool) {\n\tp, err := plot.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating new plot: %s\\n\", err)\n\t}\n\n\t\/\/ TODO: human names for test groups\n\tp.Title.Text = fmt.Sprintf(\"Latency Distribution: %s\", g.Name)\n\tp.X.Label.Text = \"Time Offset\"\n\tp.Y.Label.Text = \"Latency (usec)\"\n\tp.Add(plotter.NewGrid())\n\tp.Legend.Top = true\n\tw := vg.Points(20)\n\n\tfor i, test := range g.Tests {\n\t\tfmt.Printf(\"Histogram for %s: %v\\n\", test.Name, test.LatData.Histogram)\n\t\tbars, err := plotter.NewBarChart(test.LatData.Histogram, w)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to create new barchart for test %s: %s\\n\", test.Name, err)\n\t\t\treturn\n\t\t}\n\t\tbars.Color = CustomColors[i]\n\t\tp.Add(bars)\n\t\tp.Legend.Add(fmt.Sprintf(\"read: %s \", test.Device.Name), bars)\n\t}\n\n\n\tif logscale {\n\t\tp.Y.Scale = plot.LogScale\n\t\tp.Y.Label.Text = \"Latency (usec log(10))\"\n\n\t\t\/\/ defer the savegraph functions so panics can be recovered\n\t\t\/\/ plotinum will panic on zero values when LogScale is enabled\n\t\t\/\/ BUG\/TODO: somewhere in latency.go histograms are getting\n\t\t\/\/ entries with values of 0 which should be impossible on latency data\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tfmt.Println(\"Recovered from g.saveGraph()\", r)\n\t\t\t}\n\t\t}()\n\t\tdefer g.saveGraph(p, \"histogram_bars-logscale\")\n\t} else {\n\t\tg.saveGraph(p, \"histogram_bars\")\n\t}\n}\n\nfunc (g *Group) scatterPlot(logscale bool) {\n\tp, err := plot.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating new plot: %s\\n\", err)\n\t}\n\n\t\/\/ TODO: human names for test groups\n\tp.Title.Text = fmt.Sprintf(\"Latency Distribution: %s\", g.Name)\n\tp.X.Label.Text = \"Time Offset\"\n\tp.Y.Label.Text = \"Latency (usec)\"\n\tp.Add(plotter.NewGrid())\n\tp.Legend.Top = true\n\n\tfor i, test := range g.Tests {\n\t\tif len(test.LatData.RRecSm) > 0 {\n\t\t\t\/\/ reads get circles\n\t\t\trsp, err := plotter.NewScatter(test.LatData.RRecSm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to create new scatter plot for test %s: %s\\n\", test.Name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trsp.Shape = plot.CircleGlyph{}\n\t\t\trsp.Radius = vg.Points(3)\n\t\t\trsp.GlyphStyle.Color = CustomColors[i]\n\t\t\tp.Add(rsp)\n\t\t\tp.Legend.Add(fmt.Sprintf(\"read: %s \", test.Device.Name), rsp)\n\t\t}\n\n\t\tif len(test.LatData.WRecSm) > 0 {\n\t\t\t\/\/ writes get pyramids, same color\n\t\t\twsp, err := plotter.NewScatter(test.LatData.WRecSm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to create new scatter plot for test %s: %s\\n\", test.Name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\twsp.Shape = plot.PyramidGlyph{}\n\t\t\twsp.Radius = vg.Points(3)\n\t\t\twsp.GlyphStyle.Color = CustomColors[i]\n\t\t\tp.Add(wsp)\n\t\t\tp.Legend.Add(fmt.Sprintf(\"write: %s \", test.Device.Name), wsp)\n\t\t}\n\t}\n\n\tif logscale {\n\t\tp.Y.Scale = plot.LogScale\n\t\tp.Y.Label.Text = \"Latency (usec log(10))\"\n\t\tg.saveGraph(p, \"scatter-logscale\")\n\t} else {\n\t\tg.saveGraph(p, \"scatter\")\n\t}\n}\n\n\/\/ draws a bar graph displaying the sizes of the lat_lat.log files across\n\/\/ all tests\n\/\/ TODO: figure out how to make the bar width respond to the graph width\nfunc (g *Group) barFileSizes() {\n\tsizes := make([]int64, len(g.Tests))\n\tfor i, test := range g.Tests {\n\t\tfi, err := os.Stat(test.LatLogPath(g.Grouping.SuitePath))\n\t\tif err != nil {\n\t\t\tsizes[i] = 0\n\t\t\tcontinue\n\t\t}\n\t\tsizes[i] = fi.Size()\n\t}\n\n\tp, err := plot.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating new plot: %s\\n\", err)\n\t}\n\n\tp.Title.Text = fmt.Sprintf(\"Latency Log Sizes: %s\", g.Name)\n\tp.X.Label.Text = \"Device + Test\"\n\tp.Y.Label.Text = \"Bytes\"\n\tp.Legend.Top = true\n\tp.Add(plotter.NewGrid())\n\n\t\/\/ plotinum doesn't offer a way to draw one group of bars\n\t\/\/ with different colors, so each bar is a group with an offset\n\tvar bw float64 = 20.0\n\tvar count float64 = 0\n\tfor i, test := range g.Tests {\n\t\tif sizes[i] == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tval := plotter.Values{float64(sizes[i])}\n\t\tchart, err := plotter.NewBarChart(val, vg.Points(bw))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error adding bar to plot: %s\\n\", err)\n\t\t}\n\n\t\tchart.Color = CustomColors[i]\n\t\tchart.Offset = vg.Points(count * bw)\n\n\t\tp.Add(chart)\n\t\tp.Legend.Add(test.Name, chart)\n\n\t\tcount += 1\n\t}\n\n\tp.X.Min = 0\n\tp.X.Max = float64(count + 1)\n\n\tg.saveGraph(p, \"bar-log-size\")\n}\n\nfunc (g *Group) writeJson() {\n\tfname := fmt.Sprintf(\"group-%s-%s.json\", g.Grouping.Name, g.Name)\n\toutfile := path.Join(g.Grouping.OutPath, fname)\n\n\tjs, err := json.MarshalIndent(g, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to encode group data as JSON: %s\\n\", err)\n\t}\n\tjs = append(js, byte('\\n'))\n\n\terr = ioutil.WriteFile(outfile, js, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to write group JSON data file '%s': %s\\n\", outfile, err)\n\t}\n}\n\n\/\/ e.g. suites\/-id\/-out\/scatter-by_dev-random-read-512b.jpg\nfunc (g *Group) saveGraph(p *plot.Plot, name string) {\n\tfname := fmt.Sprintf(\"%s-%s-%s.png\", name, g.Grouping.Name, g.Name)\n\tfpath := path.Join(g.Grouping.OutPath, fname)\n\terr := p.Save(12, 8, fpath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to save %s: %s\\n\", fpath, err)\n\t}\n\tlog.Printf(\"saved graph: '%s'\\n\", fpath)\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudwatchlogs\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n)\n\nconst logMessageTestData = `\n{\n \"awslogs\":\n {\n \"data\": \"H4sIAAAAAAAAAK2TW2\/TQBCF\/8rK4jFOZu+7fnOVUHFJQbEBiTqq1va6smTHwXbShqr\/nUlTBEiAWoH27ZzR2W+OvXdB64fBXfv0sPVBFMzjNL5aLpIkPl8Ek6C72fgeZWCaSitAGi5Qbrrr877bbdGZuZth1rg2L91s7\/uh7jbDaSIZe+9aHGFA1QzYjMHs8sXbOF0k6Vr6KldVwU3FqKiMNUwzarjSnrKc6xwjhl0+FH29HTHyZd2MGB5El0Gydf3o4u22qQt39K6Wh5NdUCcKK50Q0ueVBOFYSZ21FZNeVRUvhbTe2SpYP\/At9n4zHiPvgrpETM6EMhIscKMRWRoQRnIhNafKgBaaa6zBMqMQVwvFKSipABB1rLHF0bVYCBVSWm6YtVyqyfd2MT5J41VKVv7LDkdflRGxqrLGKR6W2kBIqZehy7UJlcXLfc7Kwufk46nSiDwWl22C+8lvgI0GBcwIyrFsCVwLBBCCYq9CKS6stFRYAGWE\/TOw\/Rn4+NlCYCGDFHhETUTtlEvzORufQp6NF13pXydkP93DlMKUqwmJPyUkmb9BjeHh7N+3kX+p\/5dtFhfz55b\/H+joE+lWi\/fvnv93ZON81z+8gYjAVDLSDtl4VjeNL8kPhwKgQbJx6duuP5Ck\/upRZYYsz1B0t+TR+DB4vJjyB\/24\/Pr+G81LpuMfBAAA\"\n }\n}\n`\n\nfunc TestUnmarshal(t *testing.T) {\n\tvar event Event\n\terr := json.Unmarshal([]byte(logMessageTestData), &event)\n\tif nil != err {\n\t\tt.Errorf(\"Failed to unmarshal log event message\")\n\t}\n\tdata, err := event.AWSLogs.DecodedData()\n\tif nil != err {\n\t\tt.Error(\"Failed to decode event data: \" + err.Error())\n\t}\n\tif len(data.LogEvents) != 4 {\n\t\tt.Error(\"Failed to unmarshal 4 LogEvent entries\")\n\t}\n}\n<commit_msg>Prefer AWS event types<commit_after>package cloudwatchlogs\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\n\tawsLambdaEvents \"github.com\/aws\/aws-lambda-go\/events\"\n)\n\nconst logMessageTestData = `\n{\n \"awslogs\":\n {\n \"data\": \"H4sIAAAAAAAAAK2TW2\/TQBCF\/8rK4jFOZu+7fnOVUHFJQbEBiTqq1va6smTHwXbShqr\/nUlTBEiAWoH27ZzR2W+OvXdB64fBXfv0sPVBFMzjNL5aLpIkPl8Ek6C72fgeZWCaSitAGi5Qbrrr877bbdGZuZth1rg2L91s7\/uh7jbDaSIZe+9aHGFA1QzYjMHs8sXbOF0k6Vr6KldVwU3FqKiMNUwzarjSnrKc6xwjhl0+FH29HTHyZd2MGB5El0Gydf3o4u22qQt39K6Wh5NdUCcKK50Q0ueVBOFYSZ21FZNeVRUvhbTe2SpYP\/At9n4zHiPvgrpETM6EMhIscKMRWRoQRnIhNafKgBaaa6zBMqMQVwvFKSipABB1rLHF0bVYCBVSWm6YtVyqyfd2MT5J41VKVv7LDkdflRGxqrLGKR6W2kBIqZehy7UJlcXLfc7Kwufk46nSiDwWl22C+8lvgI0GBcwIyrFsCVwLBBCCYq9CKS6stFRYAGWE\/TOw\/Rn4+NlCYCGDFHhETUTtlEvzORufQp6NF13pXydkP93DlMKUqwmJPyUkmb9BjeHh7N+3kX+p\/5dtFhfz55b\/H+joE+lWi\/fvnv93ZON81z+8gYjAVDLSDtl4VjeNL8kPhwKgQbJx6duuP5Ck\/upRZYYsz1B0t+TR+DB4vJjyB\/24\/Pr+G81LpuMfBAAA\"\n }\n}\n`\n\nfunc TestUnmarshal(t *testing.T) {\n\tvar event awsLambdaEvents.CloudwatchLogsEvent\n\terr := json.Unmarshal([]byte(logMessageTestData), &event)\n\tif nil != err {\n\t\tt.Errorf(\"Failed to unmarshal log event message\")\n\t}\n\tdata, err := event.AWSLogs.Parse()\n\tif nil != err {\n\t\tt.Error(\"Failed to decode event data: \" + err.Error())\n\t}\n\tif len(data.LogEvents) != 4 {\n\t\tt.Error(\"Failed to unmarshal 4 LogEvent entries\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudtrail\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccAWSCloudTrail_basic(t *testing.T) {\n\tvar trail cloudtrail.Trail\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCloudTrailDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"aws_cloudtrail.foobar\", \"include_global_service_events\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfigModified,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"aws_cloudtrail.foobar\", \"s3_key_prefix\", \"\/prefix\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"aws_cloudtrail.foobar\", \"include_global_service_events\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCloudTrail_enable_logging(t *testing.T) {\n\tvar trail cloudtrail.Trail\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCloudTrailDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\ttestAccCheckCloudTrailLoggingEnabled(\"aws_cloudtrail.foobar\", false, &trail),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfigModified,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\ttestAccCheckCloudTrailLoggingEnabled(\"aws_cloudtrail.foobar\", true, &trail),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\ttestAccCheckCloudTrailLoggingEnabled(\"aws_cloudtrail.foobar\", false, &trail),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckCloudTrailExists(n string, trail *cloudtrail.Trail) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).cloudtrailconn\n\t\tparams := cloudtrail.DescribeTrailsInput{\n\t\t\tTrailNameList: []*string{aws.String(rs.Primary.ID)},\n\t\t}\n\t\tresp, err := conn.DescribeTrails(¶ms)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(resp.TrailList) == 0 {\n\t\t\treturn fmt.Errorf(\"Trail not found\")\n\t\t}\n\t\t*trail = *resp.TrailList[0]\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckCloudTrailLoggingEnabled(n string, desired bool, trail *cloudtrail.Trail) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).cloudtrailconn\n\t\tparams := cloudtrail.GetTrailStatusInput{\n\t\t\tName: aws.String(rs.Primary.ID),\n\t\t}\n\t\tresp, err := conn.GetTrailStatus(¶ms)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif *resp.IsLogging != desired {\n\t\t\treturn fmt.Errorf(\"Logging status is incorrect\")\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckAWSCloudTrailDestroy(s *terraform.State) error {\n\tconn := testAccProvider.Meta().(*AWSClient).cloudtrailconn\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"aws_cloudtrail\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tparams := cloudtrail.DescribeTrailsInput{\n\t\t\tTrailNameList: []*string{aws.String(rs.Primary.ID)},\n\t\t}\n\n\t\tresp, err := conn.DescribeTrails(¶ms)\n\n\t\tif err == nil {\n\t\t\tif len(resp.TrailList) != 0 &&\n\t\t\t\t*resp.TrailList[0].Name == rs.Primary.ID {\n\t\t\t\treturn fmt.Errorf(\"CloudTrail still exists: %s\", rs.Primary.ID)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar cloudTrailRandInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()\n\nvar testAccAWSCloudTrailConfig = fmt.Sprintf(`\nresource \"aws_cloudtrail\" \"foobar\" {\n name = \"tf-trail-foobar\"\n s3_bucket_name = \"${aws_s3_bucket.foo.id}\"\n}\n\nresource \"aws_s3_bucket\" \"foo\" {\n\tbucket = \"tf-test-trail-%d\"\n\tforce_destroy = true\n\tpolicy = <<POLICY\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailAclCheck\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:GetBucketAcl\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\"\n\t\t},\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailWrite\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:PutObject\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\/*\",\n\t\t\t\"Condition\": {\n\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\"s3:x-amz-acl\": \"bucket-owner-full-control\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t]\n}\nPOLICY\n}\n`, cloudTrailRandInt, cloudTrailRandInt, cloudTrailRandInt)\n\nvar testAccAWSCloudTrailConfigModified = fmt.Sprintf(`\nresource \"aws_cloudtrail\" \"foobar\" {\n name = \"tf-trail-foobar\"\n s3_bucket_name = \"${aws_s3_bucket.foo.id}\"\n s3_key_prefix = \"\/prefix\"\n include_global_service_events = false\n enable_logging = true\n}\n\nresource \"aws_s3_bucket\" \"foo\" {\n\tbucket = \"tf-test-trail-%d\"\n\tforce_destroy = true\n\tpolicy = <<POLICY\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailAclCheck\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:GetBucketAcl\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\"\n\t\t},\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailWrite\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:PutObject\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\/*\",\n\t\t\t\"Condition\": {\n\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\"s3:x-amz-acl\": \"bucket-owner-full-control\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t]\n}\nPOLICY\n}\n`, cloudTrailRandInt, cloudTrailRandInt, cloudTrailRandInt)\n<commit_msg>Add a comment in tests<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudtrail\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccAWSCloudTrail_basic(t *testing.T) {\n\tvar trail cloudtrail.Trail\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCloudTrailDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"aws_cloudtrail.foobar\", \"include_global_service_events\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfigModified,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"aws_cloudtrail.foobar\", \"s3_key_prefix\", \"\/prefix\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"aws_cloudtrail.foobar\", \"include_global_service_events\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCloudTrail_enable_logging(t *testing.T) {\n\tvar trail cloudtrail.Trail\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCloudTrailDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\t\/\/ This is a warning test. AWS sets up new trails with logging disabled\n\t\t\t\t\t\/\/ Should that change in the future, this test should fail.\n\t\t\t\t\ttestAccCheckCloudTrailLoggingEnabled(\"aws_cloudtrail.foobar\", false, &trail),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfigModified,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\ttestAccCheckCloudTrailLoggingEnabled(\"aws_cloudtrail.foobar\", true, &trail),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSCloudTrailConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudTrailExists(\"aws_cloudtrail.foobar\", &trail),\n\t\t\t\t\ttestAccCheckCloudTrailLoggingEnabled(\"aws_cloudtrail.foobar\", false, &trail),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckCloudTrailExists(n string, trail *cloudtrail.Trail) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).cloudtrailconn\n\t\tparams := cloudtrail.DescribeTrailsInput{\n\t\t\tTrailNameList: []*string{aws.String(rs.Primary.ID)},\n\t\t}\n\t\tresp, err := conn.DescribeTrails(¶ms)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(resp.TrailList) == 0 {\n\t\t\treturn fmt.Errorf(\"Trail not found\")\n\t\t}\n\t\t*trail = *resp.TrailList[0]\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckCloudTrailLoggingEnabled(n string, desired bool, trail *cloudtrail.Trail) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).cloudtrailconn\n\t\tparams := cloudtrail.GetTrailStatusInput{\n\t\t\tName: aws.String(rs.Primary.ID),\n\t\t}\n\t\tresp, err := conn.GetTrailStatus(¶ms)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif *resp.IsLogging != desired {\n\t\t\treturn fmt.Errorf(\"Logging status is incorrect\")\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckAWSCloudTrailDestroy(s *terraform.State) error {\n\tconn := testAccProvider.Meta().(*AWSClient).cloudtrailconn\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"aws_cloudtrail\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tparams := cloudtrail.DescribeTrailsInput{\n\t\t\tTrailNameList: []*string{aws.String(rs.Primary.ID)},\n\t\t}\n\n\t\tresp, err := conn.DescribeTrails(¶ms)\n\n\t\tif err == nil {\n\t\t\tif len(resp.TrailList) != 0 &&\n\t\t\t\t*resp.TrailList[0].Name == rs.Primary.ID {\n\t\t\t\treturn fmt.Errorf(\"CloudTrail still exists: %s\", rs.Primary.ID)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar cloudTrailRandInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()\n\nvar testAccAWSCloudTrailConfig = fmt.Sprintf(`\nresource \"aws_cloudtrail\" \"foobar\" {\n name = \"tf-trail-foobar\"\n s3_bucket_name = \"${aws_s3_bucket.foo.id}\"\n}\n\nresource \"aws_s3_bucket\" \"foo\" {\n\tbucket = \"tf-test-trail-%d\"\n\tforce_destroy = true\n\tpolicy = <<POLICY\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailAclCheck\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:GetBucketAcl\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\"\n\t\t},\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailWrite\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:PutObject\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\/*\",\n\t\t\t\"Condition\": {\n\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\"s3:x-amz-acl\": \"bucket-owner-full-control\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t]\n}\nPOLICY\n}\n`, cloudTrailRandInt, cloudTrailRandInt, cloudTrailRandInt)\n\nvar testAccAWSCloudTrailConfigModified = fmt.Sprintf(`\nresource \"aws_cloudtrail\" \"foobar\" {\n name = \"tf-trail-foobar\"\n s3_bucket_name = \"${aws_s3_bucket.foo.id}\"\n s3_key_prefix = \"\/prefix\"\n include_global_service_events = false\n enable_logging = true\n}\n\nresource \"aws_s3_bucket\" \"foo\" {\n\tbucket = \"tf-test-trail-%d\"\n\tforce_destroy = true\n\tpolicy = <<POLICY\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailAclCheck\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:GetBucketAcl\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\"\n\t\t},\n\t\t{\n\t\t\t\"Sid\": \"AWSCloudTrailWrite\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Action\": \"s3:PutObject\",\n\t\t\t\"Resource\": \"arn:aws:s3:::tf-test-trail-%d\/*\",\n\t\t\t\"Condition\": {\n\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\"s3:x-amz-acl\": \"bucket-owner-full-control\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t]\n}\nPOLICY\n}\n`, cloudTrailRandInt, cloudTrailRandInt, cloudTrailRandInt)\n<|endoftext|>"} {"text":"<commit_before>package taskrunner\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n\tplugin \"github.com\/hashicorp\/go-plugin\"\n\t\"github.com\/hashicorp\/nomad\/client\/allocrunner\/interfaces\"\n\t\"github.com\/hashicorp\/nomad\/client\/logmon\"\n\t\"github.com\/hashicorp\/nomad\/helper\/uuid\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\tpstructs \"github.com\/hashicorp\/nomad\/plugins\/shared\/structs\"\n)\n\nconst (\n\t\/\/ logmonReattachKey is the HookData key where logmon's reattach config\n\t\/\/ is stored.\n\tlogmonReattachKey = \"reattach_config\"\n)\n\n\/\/ logmonHook launches logmon and manages task logging\ntype logmonHook struct {\n\t\/\/ logmon is the handle to the log monitor process for the task.\n\tlogmon logmon.LogMon\n\tlogmonPluginClient *plugin.Client\n\n\tconfig *logmonHookConfig\n\n\tlogger hclog.Logger\n}\n\ntype logmonHookConfig struct {\n\tlogDir string\n\tstdoutFifo string\n\tstderrFifo string\n}\n\nfunc newLogMonHook(cfg *logmonHookConfig, logger hclog.Logger) *logmonHook {\n\thook := &logmonHook{\n\t\tconfig: cfg,\n\t\tlogger: logger,\n\t}\n\n\treturn hook\n}\n\nfunc newLogMonHookConfig(taskName, logDir string) *logmonHookConfig {\n\tcfg := &logmonHookConfig{\n\t\tlogDir: logDir,\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\tid := uuid.Generate()[:8]\n\t\tcfg.stdoutFifo = fmt.Sprintf(\"\/\/.\/pipe\/%s-%s.stdout\", taskName, id)\n\t\tcfg.stderrFifo = fmt.Sprintf(\"\/\/.\/pipe\/%s-%s.stderr\", taskName, id)\n\t} else {\n\t\tcfg.stdoutFifo = filepath.Join(logDir, fmt.Sprintf(\".%s.stdout.fifo\", taskName))\n\t\tcfg.stderrFifo = filepath.Join(logDir, fmt.Sprintf(\".%s.stderr.fifo\", taskName))\n\t}\n\treturn cfg\n}\n\nfunc (*logmonHook) Name() string {\n\treturn \"logmon\"\n}\n\nfunc (h *logmonHook) launchLogMon(reattachConfig *plugin.ReattachConfig) error {\n\tl, c, err := logmon.LaunchLogMon(h.logger, reattachConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.logmon = l\n\th.logmonPluginClient = c\n\treturn nil\n}\n\nfunc reattachConfigFromHookData(data map[string]string) (*plugin.ReattachConfig, error) {\n\tif data == nil || data[logmonReattachKey] == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tvar cfg pstructs.ReattachConfig\n\terr := json.Unmarshal([]byte(data[logmonReattachKey]), &cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pstructs.ReattachConfigToGoPlugin(&cfg)\n}\n\nfunc (h *logmonHook) Prestart(ctx context.Context,\n\treq *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {\n\n\t\/\/ Attempt to reattach to logmon\n\tif h.logmonPluginClient == nil {\n\t\treattachConfig, err := reattachConfigFromHookData(req.PreviousState)\n\t\tif err != nil {\n\t\t\th.logger.Error(\"failed to load reattach config\", \"error\", err)\n\t\t\treturn err\n\t\t}\n\t\tif reattachConfig != nil {\n\t\t\tif err := h.launchLogMon(reattachConfig); err != nil {\n\t\t\t\th.logger.Warn(\"failed to reattach to logmon process\", \"error\", err)\n\t\t\t}\n\t\t}\n\n\t}\n\n\t\/\/ We did not reattach to a plugin and one is still not running.\n\tif h.logmonPluginClient == nil || h.logmonPluginClient.Exited() {\n\t\tif err := h.launchLogMon(nil); err != nil {\n\t\t\t\/\/ Retry errors launching logmon as logmon may have crashed on start and\n\t\t\t\/\/ subsequent attempts will start a new one.\n\t\t\th.logger.Error(\"failed to launch logmon process\", \"error\", err)\n\t\t\treturn structs.NewRecoverableError(err, true)\n\t\t}\n\t}\n\n\terr := h.logmon.Start(&logmon.LogConfig{\n\t\tLogDir: h.config.logDir,\n\t\tStdoutLogFile: fmt.Sprintf(\"%s.stdout\", req.Task.Name),\n\t\tStderrLogFile: fmt.Sprintf(\"%s.stderr\", req.Task.Name),\n\t\tStdoutFifo: h.config.stdoutFifo,\n\t\tStderrFifo: h.config.stderrFifo,\n\t\tMaxFiles: req.Task.LogConfig.MaxFiles,\n\t\tMaxFileSizeMB: req.Task.LogConfig.MaxFileSizeMB,\n\t})\n\tif err != nil {\n\t\th.logger.Error(\"failed to start logmon\", \"error\", err)\n\t\treturn err\n\t}\n\n\trCfg := pstructs.ReattachConfigFromGoPlugin(h.logmonPluginClient.ReattachConfig())\n\tjsonCfg, err := json.Marshal(rCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.State = map[string]string{logmonReattachKey: string(jsonCfg)}\n\treturn nil\n}\n\nfunc (h *logmonHook) Stop(_ context.Context, req *interfaces.TaskStopRequest, _ *interfaces.TaskStopResponse) error {\n\n\t\/\/ It's possible that Stop was called without calling Prestart on agent\n\t\/\/ restarts. Attempt to reattach to an existing logmon.\n\tif h.logmon == nil || h.logmonPluginClient == nil {\n\t\tif err := h.reattach(req); err != nil {\n\t\t\th.logger.Trace(\"error reattaching to logmon when stopping\", \"error\", err)\n\t\t}\n\t}\n\n\tif h.logmon != nil {\n\t\th.logmon.Stop()\n\t}\n\tif h.logmonPluginClient != nil {\n\t\th.logmonPluginClient.Kill()\n\t}\n\n\treturn nil\n}\n\n\/\/ reattach to a running logmon if possible. Will not start a new logmon.\nfunc (h *logmonHook) reattach(req *interfaces.TaskStopRequest) error {\n\treattachConfig, err := reattachConfigFromHookData(req.ExistingState)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Give up if there's no reattach config\n\tif reattachConfig == nil {\n\t\treturn nil\n\t}\n\n\treturn h.launchLogMon(reattachConfig)\n}\n<commit_msg>logmon: retry starting logmon if it exits<commit_after>package taskrunner\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n\tplugin \"github.com\/hashicorp\/go-plugin\"\n\t\"github.com\/hashicorp\/nomad\/client\/allocrunner\/interfaces\"\n\t\"github.com\/hashicorp\/nomad\/client\/logmon\"\n\t\"github.com\/hashicorp\/nomad\/helper\/uuid\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\tbstructs \"github.com\/hashicorp\/nomad\/plugins\/base\/structs\"\n\tpstructs \"github.com\/hashicorp\/nomad\/plugins\/shared\/structs\"\n)\n\nconst (\n\t\/\/ logmonReattachKey is the HookData key where logmon's reattach config\n\t\/\/ is stored.\n\tlogmonReattachKey = \"reattach_config\"\n)\n\n\/\/ logmonHook launches logmon and manages task logging\ntype logmonHook struct {\n\t\/\/ logmon is the handle to the log monitor process for the task.\n\tlogmon logmon.LogMon\n\tlogmonPluginClient *plugin.Client\n\n\tconfig *logmonHookConfig\n\n\tlogger hclog.Logger\n}\n\ntype logmonHookConfig struct {\n\tlogDir string\n\tstdoutFifo string\n\tstderrFifo string\n}\n\nfunc newLogMonHook(cfg *logmonHookConfig, logger hclog.Logger) *logmonHook {\n\thook := &logmonHook{\n\t\tconfig: cfg,\n\t\tlogger: logger,\n\t}\n\n\treturn hook\n}\n\nfunc newLogMonHookConfig(taskName, logDir string) *logmonHookConfig {\n\tcfg := &logmonHookConfig{\n\t\tlogDir: logDir,\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\tid := uuid.Generate()[:8]\n\t\tcfg.stdoutFifo = fmt.Sprintf(\"\/\/.\/pipe\/%s-%s.stdout\", taskName, id)\n\t\tcfg.stderrFifo = fmt.Sprintf(\"\/\/.\/pipe\/%s-%s.stderr\", taskName, id)\n\t} else {\n\t\tcfg.stdoutFifo = filepath.Join(logDir, fmt.Sprintf(\".%s.stdout.fifo\", taskName))\n\t\tcfg.stderrFifo = filepath.Join(logDir, fmt.Sprintf(\".%s.stderr.fifo\", taskName))\n\t}\n\treturn cfg\n}\n\nfunc (*logmonHook) Name() string {\n\treturn \"logmon\"\n}\n\nfunc (h *logmonHook) launchLogMon(reattachConfig *plugin.ReattachConfig) error {\n\tl, c, err := logmon.LaunchLogMon(h.logger, reattachConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.logmon = l\n\th.logmonPluginClient = c\n\treturn nil\n}\n\nfunc reattachConfigFromHookData(data map[string]string) (*plugin.ReattachConfig, error) {\n\tif data == nil || data[logmonReattachKey] == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tvar cfg pstructs.ReattachConfig\n\terr := json.Unmarshal([]byte(data[logmonReattachKey]), &cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pstructs.ReattachConfigToGoPlugin(&cfg)\n}\n\nfunc (h *logmonHook) Prestart(ctx context.Context,\n\treq *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {\n\n\ttries := 0\n\tfor {\n\t\terr := h.prestartOneLoop(ctx, req)\n\t\tif err == bstructs.ErrPluginShutdown {\n\t\t\th.logger.Warn(\"logmon shutdown while making request\", \"error\", err)\n\n\t\t\tif tries > 3 {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ retry after killing process and ensure we start a new logmon process\n\t\t\ttries++\n\t\t\th.logmonPluginClient.Kill()\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trCfg := pstructs.ReattachConfigFromGoPlugin(h.logmonPluginClient.ReattachConfig())\n\t\tjsonCfg, err := json.Marshal(rCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.State = map[string]string{logmonReattachKey: string(jsonCfg)}\n\t\treturn nil\n\t}\n}\n\nfunc (h *logmonHook) prestartOneLoop(ctx context.Context, req *interfaces.TaskPrestartRequest) error {\n\t\/\/ attach to a running logmon if state indicates one\n\tif h.logmonPluginClient == nil {\n\t\treattachConfig, err := reattachConfigFromHookData(req.PreviousState)\n\t\tif err != nil {\n\t\t\th.logger.Error(\"failed to load reattach config\", \"error\", err)\n\t\t\treturn err\n\t\t}\n\t\tif reattachConfig != nil {\n\t\t\tif err := h.launchLogMon(reattachConfig); err != nil {\n\t\t\t\th.logger.Warn(\"failed to reattach to logmon process\", \"error\", err)\n\t\t\t\t\/\/ if we failed to launch logmon, try again below\n\t\t\t}\n\t\t}\n\n\t}\n\n\t\/\/ create a new client in initial starts, failed reattachment, or if we detect exits\n\tif h.logmonPluginClient == nil || h.logmonPluginClient.Exited() {\n\t\tif err := h.launchLogMon(nil); err != nil {\n\t\t\t\/\/ Retry errors launching logmon as logmon may have crashed on start and\n\t\t\t\/\/ subsequent attempts will start a new one.\n\t\t\th.logger.Error(\"failed to launch logmon process\", \"error\", err)\n\t\t\treturn structs.NewRecoverableError(err, true)\n\t\t}\n\t}\n\n\terr := h.logmon.Start(&logmon.LogConfig{\n\t\tLogDir: h.config.logDir,\n\t\tStdoutLogFile: fmt.Sprintf(\"%s.stdout\", req.Task.Name),\n\t\tStderrLogFile: fmt.Sprintf(\"%s.stderr\", req.Task.Name),\n\t\tStdoutFifo: h.config.stdoutFifo,\n\t\tStderrFifo: h.config.stderrFifo,\n\t\tMaxFiles: req.Task.LogConfig.MaxFiles,\n\t\tMaxFileSizeMB: req.Task.LogConfig.MaxFileSizeMB,\n\t})\n\tif err != nil {\n\t\th.logger.Error(\"failed to start logmon\", \"error\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (h *logmonHook) Stop(_ context.Context, req *interfaces.TaskStopRequest, _ *interfaces.TaskStopResponse) error {\n\n\t\/\/ It's possible that Stop was called without calling Prestart on agent\n\t\/\/ restarts. Attempt to reattach to an existing logmon.\n\tif h.logmon == nil || h.logmonPluginClient == nil {\n\t\tif err := h.reattach(req); err != nil {\n\t\t\th.logger.Trace(\"error reattaching to logmon when stopping\", \"error\", err)\n\t\t}\n\t}\n\n\tif h.logmon != nil {\n\t\th.logmon.Stop()\n\t}\n\tif h.logmonPluginClient != nil {\n\t\th.logmonPluginClient.Kill()\n\t}\n\n\treturn nil\n}\n\n\/\/ reattach to a running logmon if possible. Will not start a new logmon.\nfunc (h *logmonHook) reattach(req *interfaces.TaskStopRequest) error {\n\treattachConfig, err := reattachConfigFromHookData(req.ExistingState)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Give up if there's no reattach config\n\tif reattachConfig == nil {\n\t\treturn nil\n\t}\n\n\treturn h.launchLogMon(reattachConfig)\n}\n<|endoftext|>"} {"text":"<commit_before>package portmapper\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/pkg\/reexec\"\n\t\"github.com\/docker\/libnetwork\/iptables\"\n\t\"github.com\/docker\/libnetwork\/netutils\"\n)\n\nfunc TestMain(m *testing.M) {\n\tif reexec.Init() {\n\t\treturn\n\t}\n\tos.Exit(m.Run())\n}\n\nfunc TestSetIptablesChain(t *testing.T) {\n\tpm := New()\n\n\tc := &iptables.Chain{\n\t\tName: \"TEST\",\n\t\tBridge: \"192.168.1.1\",\n\t}\n\n\tif pm.chain != nil {\n\t\tt.Fatal(\"chain should be nil at init\")\n\t}\n\n\tpm.SetIptablesChain(c)\n\tif pm.chain == nil {\n\t\tt.Fatal(\"chain should not be nil after set\")\n\t}\n}\n\nfunc TestMapTCPPorts(t *testing.T) {\n\tdefer netutils.SetupTestNetNS(t)()\n\tpm := New()\n\tdstIP1 := net.ParseIP(\"192.168.0.1\")\n\tdstIP2 := net.ParseIP(\"192.168.0.2\")\n\tdstAddr1 := &net.TCPAddr{IP: dstIP1, Port: 80}\n\tdstAddr2 := &net.TCPAddr{IP: dstIP2, Port: 80}\n\n\tsrcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.1\")}\n\tsrcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.2\")}\n\n\taddrEqual := func(addr1, addr2 net.Addr) bool {\n\t\treturn (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())\n\t}\n\n\tif host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t} else if !addrEqual(dstAddr1, host) {\n\t\tt.Fatalf(\"Incorrect mapping result: expected %s:%s, got %s:%s\",\n\t\t\tdstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())\n\t}\n\n\tif _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t}\n\n\tif pm.Unmap(dstAddr1) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) == nil {\n\t\tt.Fatalf(\"Port already released, but no error reported\")\n\t}\n}\n\nfunc TestGetUDPKey(t *testing.T) {\n\taddr := &net.UDPAddr{IP: net.ParseIP(\"192.168.1.5\"), Port: 53}\n\n\tkey := getKey(addr)\n\n\tif expected := \"192.168.1.5:53\/udp\"; key != expected {\n\t\tt.Fatalf(\"expected key %s got %s\", expected, key)\n\t}\n}\n\nfunc TestGetTCPKey(t *testing.T) {\n\taddr := &net.TCPAddr{IP: net.ParseIP(\"192.168.1.5\"), Port: 80}\n\n\tkey := getKey(addr)\n\n\tif expected := \"192.168.1.5:80\/tcp\"; key != expected {\n\t\tt.Fatalf(\"expected key %s got %s\", expected, key)\n\t}\n}\n\nfunc TestGetUDPIPAndPort(t *testing.T) {\n\taddr := &net.UDPAddr{IP: net.ParseIP(\"192.168.1.5\"), Port: 53}\n\n\tip, port := getIPAndPort(addr)\n\tif expected := \"192.168.1.5\"; ip.String() != expected {\n\t\tt.Fatalf(\"expected ip %s got %s\", expected, ip)\n\t}\n\n\tif ep := 53; port != ep {\n\t\tt.Fatalf(\"expected port %d got %d\", ep, port)\n\t}\n}\n\nfunc TestMapUDPPorts(t *testing.T) {\n\tdefer netutils.SetupTestNetNS(t)()\n\tpm := New()\n\tdstIP1 := net.ParseIP(\"192.168.0.1\")\n\tdstIP2 := net.ParseIP(\"192.168.0.2\")\n\tdstAddr1 := &net.UDPAddr{IP: dstIP1, Port: 80}\n\tdstAddr2 := &net.UDPAddr{IP: dstIP2, Port: 80}\n\n\tsrcAddr1 := &net.UDPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.1\")}\n\tsrcAddr2 := &net.UDPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.2\")}\n\n\taddrEqual := func(addr1, addr2 net.Addr) bool {\n\t\treturn (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())\n\t}\n\n\tif host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t} else if !addrEqual(dstAddr1, host) {\n\t\tt.Fatalf(\"Incorrect mapping result: expected %s:%s, got %s:%s\",\n\t\t\tdstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())\n\t}\n\n\tif _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t}\n\n\tif pm.Unmap(dstAddr1) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) == nil {\n\t\tt.Fatalf(\"Port already released, but no error reported\")\n\t}\n}\n\nfunc TestMapAllPortsSingleInterface(t *testing.T) {\n\tnewProxy = newMockProxyCommand\n\tdefer func() {\n\t\tnewProxy = newProxyCommand\n\t}()\n\tdefer netutils.SetupTestNetNS(t)()\n\tpm := New()\n\tdstIP1 := net.ParseIP(\"0.0.0.0\")\n\tsrcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.1\")}\n\n\thosts := []net.Addr{}\n\tvar host net.Addr\n\tvar err error\n\n\tfor i := 0; i < 10; i++ {\n\t\tstart, end := pm.Allocator.Begin, pm.Allocator.End\n\t\tfor i := start; i < end; i++ {\n\t\t\tif host, err = pm.Map(srcAddr1, dstIP1, 0, true); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\thosts = append(hosts, host)\n\t\t}\n\n\t\tif _, err := pm.Map(srcAddr1, dstIP1, start, true); err == nil {\n\t\t\tt.Fatalf(\"Port %d should be bound but is not\", start)\n\t\t}\n\n\t\tfor _, val := range hosts {\n\t\t\tif err := pm.Unmap(val); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\thosts = []net.Addr{}\n\t}\n}\n\nfunc TestExecProxy(t *testing.T) {\n\tdefer netutils.SetupTestNetNS(t)()\n\targs := []string{\n\t\tuserlandProxyCommandName,\n\t\t\"-proto\", \"tcp\",\n\t\t\"-host-ip\", \"0.0.0.0\",\n\t\t\"-host-port\", \"9999\",\n\t\t\"-container-ip\", \"172.168.1.1\",\n\t\t\"-container-port\", \"8888\",\n\t}\n\tos.Args = args\n\tdoneChan := make(chan bool)\n\tgo func() {\n\t\texecProxy()\n\t\tdoneChan <- true\n\t}()\n\n\tselect {\n\tcase <-doneChan:\n\t\tt.Fatal(\"execProxy is not supposed to exit\")\n\tcase <-time.After(3 * time.Second):\n\t\treturn\n\t}\n}\n<commit_msg>Revert \"Added more test coverage for portmapper package.\"<commit_after>package portmapper\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"github.com\/docker\/libnetwork\/iptables\"\n\t_ \"github.com\/docker\/libnetwork\/netutils\"\n)\n\nfunc init() {\n\t\/\/ override this func to mock out the proxy server\n\tnewProxy = newMockProxyCommand\n}\n\nfunc TestSetIptablesChain(t *testing.T) {\n\tpm := New()\n\n\tc := &iptables.Chain{\n\t\tName: \"TEST\",\n\t\tBridge: \"192.168.1.1\",\n\t}\n\n\tif pm.chain != nil {\n\t\tt.Fatal(\"chain should be nil at init\")\n\t}\n\n\tpm.SetIptablesChain(c)\n\tif pm.chain == nil {\n\t\tt.Fatal(\"chain should not be nil after set\")\n\t}\n}\n\nfunc TestMapTCPPorts(t *testing.T) {\n\tpm := New()\n\tdstIP1 := net.ParseIP(\"192.168.0.1\")\n\tdstIP2 := net.ParseIP(\"192.168.0.2\")\n\tdstAddr1 := &net.TCPAddr{IP: dstIP1, Port: 80}\n\tdstAddr2 := &net.TCPAddr{IP: dstIP2, Port: 80}\n\n\tsrcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.1\")}\n\tsrcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.2\")}\n\n\taddrEqual := func(addr1, addr2 net.Addr) bool {\n\t\treturn (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())\n\t}\n\n\tif host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t} else if !addrEqual(dstAddr1, host) {\n\t\tt.Fatalf(\"Incorrect mapping result: expected %s:%s, got %s:%s\",\n\t\t\tdstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())\n\t}\n\n\tif _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t}\n\n\tif pm.Unmap(dstAddr1) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) == nil {\n\t\tt.Fatalf(\"Port already released, but no error reported\")\n\t}\n}\n\nfunc TestGetUDPKey(t *testing.T) {\n\taddr := &net.UDPAddr{IP: net.ParseIP(\"192.168.1.5\"), Port: 53}\n\n\tkey := getKey(addr)\n\n\tif expected := \"192.168.1.5:53\/udp\"; key != expected {\n\t\tt.Fatalf(\"expected key %s got %s\", expected, key)\n\t}\n}\n\nfunc TestGetTCPKey(t *testing.T) {\n\taddr := &net.TCPAddr{IP: net.ParseIP(\"192.168.1.5\"), Port: 80}\n\n\tkey := getKey(addr)\n\n\tif expected := \"192.168.1.5:80\/tcp\"; key != expected {\n\t\tt.Fatalf(\"expected key %s got %s\", expected, key)\n\t}\n}\n\nfunc TestGetUDPIPAndPort(t *testing.T) {\n\taddr := &net.UDPAddr{IP: net.ParseIP(\"192.168.1.5\"), Port: 53}\n\n\tip, port := getIPAndPort(addr)\n\tif expected := \"192.168.1.5\"; ip.String() != expected {\n\t\tt.Fatalf(\"expected ip %s got %s\", expected, ip)\n\t}\n\n\tif ep := 53; port != ep {\n\t\tt.Fatalf(\"expected port %d got %d\", ep, port)\n\t}\n}\n\nfunc TestMapUDPPorts(t *testing.T) {\n\tpm := New()\n\tdstIP1 := net.ParseIP(\"192.168.0.1\")\n\tdstIP2 := net.ParseIP(\"192.168.0.2\")\n\tdstAddr1 := &net.UDPAddr{IP: dstIP1, Port: 80}\n\tdstAddr2 := &net.UDPAddr{IP: dstIP2, Port: 80}\n\n\tsrcAddr1 := &net.UDPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.1\")}\n\tsrcAddr2 := &net.UDPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.2\")}\n\n\taddrEqual := func(addr1, addr2 net.Addr) bool {\n\t\treturn (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())\n\t}\n\n\tif host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t} else if !addrEqual(dstAddr1, host) {\n\t\tt.Fatalf(\"Incorrect mapping result: expected %s:%s, got %s:%s\",\n\t\t\tdstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())\n\t}\n\n\tif _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {\n\t\tt.Fatalf(\"Port is in use - mapping should have failed\")\n\t}\n\n\tif _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {\n\t\tt.Fatalf(\"Failed to allocate port: %s\", err)\n\t}\n\n\tif pm.Unmap(dstAddr1) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) != nil {\n\t\tt.Fatalf(\"Failed to release port\")\n\t}\n\n\tif pm.Unmap(dstAddr2) == nil {\n\t\tt.Fatalf(\"Port already released, but no error reported\")\n\t}\n}\n\nfunc TestMapAllPortsSingleInterface(t *testing.T) {\n\tpm := New()\n\tdstIP1 := net.ParseIP(\"0.0.0.0\")\n\tsrcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP(\"172.16.0.1\")}\n\n\thosts := []net.Addr{}\n\tvar host net.Addr\n\tvar err error\n\n\tdefer func() {\n\t\tfor _, val := range hosts {\n\t\t\tpm.Unmap(val)\n\t\t}\n\t}()\n\n\tfor i := 0; i < 10; i++ {\n\t\tstart, end := pm.Allocator.Begin, pm.Allocator.End\n\t\tfor i := start; i < end; i++ {\n\t\t\tif host, err = pm.Map(srcAddr1, dstIP1, 0, true); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\thosts = append(hosts, host)\n\t\t}\n\n\t\tif _, err := pm.Map(srcAddr1, dstIP1, start, true); err == nil {\n\t\t\tt.Fatalf(\"Port %d should be bound but is not\", start)\n\t\t}\n\n\t\tfor _, val := range hosts {\n\t\t\tif err := pm.Unmap(val); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\thosts = []net.Addr{}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tar\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"polydawn.net\/repeatr\/lib\/guid\"\n\t\"polydawn.net\/repeatr\/rio\"\n)\n\nfunc makeReader(dataHash rio.CommitID, warehouseCoords rio.SiloURI) io.ReadCloser {\n\tu, err := url.Parse(string(warehouseCoords))\n\tif err != nil {\n\t\tpanic(rio.ConfigError.New(\"failed to parse URI: %s\", err))\n\t}\n\tswitch u.Scheme {\n\tcase \"file+ca\":\n\t\tu.Path = filepath.Join(u.Path, string(dataHash))\n\t\tfallthrough\n\tcase \"file\":\n\t\tu.Path = filepath.Join(u.Host, u.Path) \/\/ file uris don't have hosts\n\t\tfile, err := os.OpenFile(u.Path, os.O_RDONLY, 0644)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tpanic(rio.DataDNE.New(\"Unable to read %q: %s\", u.String(), err))\n\t\t\t} else {\n\t\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to read %q: %s\", u.String(), err))\n\t\t\t}\n\t\t}\n\t\treturn file\n\tcase \"http+ca\":\n\t\tu.Path = path.Join(u.Path, string(dataHash))\n\t\tu.Scheme = \"http\"\n\t\tfallthrough\n\tcase \"http\":\n\t\tresp, err := http.Get(u.String())\n\t\tif err != nil {\n\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to fetch %q: %s\", u.String(), err))\n\t\t}\n\t\treturn resp.Body\n\tcase \"https+ca\":\n\t\tu.Path = path.Join(u.Path, string(dataHash))\n\t\tu.Scheme = \"https\"\n\t\tfallthrough\n\tcase \"https\":\n\t\tresp, err := http.Get(u.String())\n\t\tif err != nil {\n\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to fetch %q: %s\", u.String(), err))\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase 200:\n\t\t\treturn resp.Body\n\t\tcase 404:\n\t\t\tpanic(rio.DataDNE.New(\"Fetch %q: not found\", u.String()))\n\t\tdefault:\n\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to fetch %q: http status %s\", u.String(), resp.Status))\n\t\t}\n\tcase \"\":\n\t\tpanic(rio.ConfigError.New(\"missing scheme in warehouse URI; need a prefix, e.g. \\\"file:\/\/\\\" or \\\"http:\/\/\\\"\"))\n\tdefault:\n\t\tpanic(rio.ConfigError.New(\"unsupported scheme in warehouse URI: %q\", u.Scheme))\n\t}\n}\n\n\/\/ summarizes behavior of basically all transports where tar is used as the fs metaphor... they're just one blob\n\/\/ ... nvm, haven't actually thought of anything that needs more than io.ReadCloser yet\n\/\/type soloStreamReader struct {\n\/\/\tio.Reader\n\/\/\tio.Closer\n\/\/}\n\nfunc makeWriteController(warehouseCoords rio.SiloURI) StreamingWarehouseWriteController {\n\tu, err := url.Parse(string(warehouseCoords))\n\tif err != nil {\n\t\tpanic(rio.ConfigError.New(\"failed to parse URI: %s\", err))\n\t}\n\tcontroller := &fileWarehouseWriteController{\n\t\tpathPrefix: u.Path,\n\t}\n\tswitch u.Scheme {\n\tcase \"file+ca\":\n\t\tcontroller.ctntAddr = true\n\t\tfallthrough\n\tcase \"file\":\n\t\t\/\/ Pick a random upload path\n\t\tcontroller.pathPrefix = filepath.Join(u.Host, controller.pathPrefix) \/\/ file uris don't have hosts\n\t\tif controller.ctntAddr {\n\t\t\tcontroller.tmpPath = filepath.Join(controller.pathPrefix, \".tmp.upload.\"+guid.New())\n\t\t} else {\n\t\t\tcontroller.tmpPath = filepath.Join(path.Dir(controller.pathPrefix), \".tmp.upload.\"+path.Base(controller.pathPrefix)+\".\"+guid.New())\n\t\t}\n\t\t\/\/ Check if warehouse path exists.\n\t\t\/\/ Warehouse is expected to exist already; transmats\n\t\t\/\/ should *not* create one whimsically, that's someone else's responsibility.\n\t\twarehouseBasePath := filepath.Dir(controller.tmpPath)\n\t\tif _, err := os.Stat(warehouseBasePath); err != nil {\n\t\t\tpanic(rio.WarehouseUnavailableError.New(\"Warehouse unavailable: %q %s\", warehouseBasePath, err))\n\t\t}\n\t\t\/\/ Open file to shovel data into\n\t\tfile, err := os.OpenFile(controller.tmpPath, os.O_CREATE|os.O_WRONLY, 0644)\n\t\tif err != nil {\n\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to write %q: %s\", controller.tmpPath, err))\n\t\t}\n\t\tcontroller.stream = file\n\t\treturn controller\n\tcase \"http+ca\":\n\t\tfallthrough\n\tcase \"http\":\n\t\tfallthrough\n\tcase \"https+ca\":\n\t\tfallthrough\n\tcase \"https\":\n\t\tpanic(rio.ConfigError.New(\"http transports are only supported for read-only use\"))\n\tcase \"\":\n\t\tpanic(rio.ConfigError.New(\"missing scheme in warehouse URI; need a prefix, e.g. \\\"file:\/\/\\\" or \\\"http:\/\/\\\"\"))\n\tdefault:\n\t\tpanic(rio.ConfigError.New(\"unsupported scheme in warehouse URI: %q\", u.Scheme))\n\t}\n}\n\ntype StreamingWarehouseWriteController interface {\n\tWriter() io.Writer\n\tCommit(dataHash rio.CommitID)\n}\n\ntype fileWarehouseWriteController struct {\n\tstream io.WriteCloser\n\ttmpPath string\n\tpathPrefix string\n\tctntAddr bool\n}\n\nfunc (wc *fileWarehouseWriteController) Writer() io.Writer {\n\treturn wc.stream\n}\nfunc (wc *fileWarehouseWriteController) Commit(dataHash rio.CommitID) {\n\twc.stream.Close()\n\tvar finalPath string\n\tif wc.ctntAddr {\n\t\tfinalPath = path.Join(wc.pathPrefix, string(dataHash))\n\t} else {\n\t\tfinalPath = wc.pathPrefix\n\t}\n\tos.Rename(wc.tmpPath, finalPath)\n}\n<commit_msg>Most of these errors should be considered \"warehouse unavailable\" by default.<commit_after>package tar\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"polydawn.net\/repeatr\/lib\/guid\"\n\t\"polydawn.net\/repeatr\/rio\"\n)\n\nfunc makeReader(dataHash rio.CommitID, warehouseCoords rio.SiloURI) io.ReadCloser {\n\tu, err := url.Parse(string(warehouseCoords))\n\tif err != nil {\n\t\tpanic(rio.ConfigError.New(\"failed to parse URI: %s\", err))\n\t}\n\tswitch u.Scheme {\n\tcase \"file+ca\":\n\t\tu.Path = filepath.Join(u.Path, string(dataHash))\n\t\tfallthrough\n\tcase \"file\":\n\t\tu.Path = filepath.Join(u.Host, u.Path) \/\/ file uris don't have hosts\n\t\tfile, err := os.OpenFile(u.Path, os.O_RDONLY, 0644)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tpanic(rio.DataDNE.New(\"Unable to read %q: %s\", u.String(), err))\n\t\t\t} else {\n\t\t\t\tpanic(rio.WarehouseUnavailableError.New(\"Unable to read %q: %s\", u.String(), err))\n\t\t\t}\n\t\t}\n\t\treturn file\n\tcase \"http+ca\":\n\t\tu.Path = path.Join(u.Path, string(dataHash))\n\t\tu.Scheme = \"http\"\n\t\tfallthrough\n\tcase \"http\":\n\t\tresp, err := http.Get(u.String())\n\t\tif err != nil {\n\t\t\tpanic(rio.WarehouseUnavailableError.New(\"Unable to fetch %q: %s\", u.String(), err))\n\t\t}\n\t\treturn resp.Body\n\tcase \"https+ca\":\n\t\tu.Path = path.Join(u.Path, string(dataHash))\n\t\tu.Scheme = \"https\"\n\t\tfallthrough\n\tcase \"https\":\n\t\tresp, err := http.Get(u.String())\n\t\tif err != nil {\n\t\t\tpanic(rio.WarehouseUnavailableError.New(\"Unable to fetch %q: %s\", u.String(), err))\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase 200:\n\t\t\treturn resp.Body\n\t\tcase 404:\n\t\t\tpanic(rio.DataDNE.New(\"Fetch %q: not found\", u.String()))\n\t\tdefault:\n\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to fetch %q: http status %s\", u.String(), resp.Status))\n\t\t}\n\tcase \"\":\n\t\tpanic(rio.ConfigError.New(\"missing scheme in warehouse URI; need a prefix, e.g. \\\"file:\/\/\\\" or \\\"http:\/\/\\\"\"))\n\tdefault:\n\t\tpanic(rio.ConfigError.New(\"unsupported scheme in warehouse URI: %q\", u.Scheme))\n\t}\n}\n\n\/\/ summarizes behavior of basically all transports where tar is used as the fs metaphor... they're just one blob\n\/\/ ... nvm, haven't actually thought of anything that needs more than io.ReadCloser yet\n\/\/type soloStreamReader struct {\n\/\/\tio.Reader\n\/\/\tio.Closer\n\/\/}\n\nfunc makeWriteController(warehouseCoords rio.SiloURI) StreamingWarehouseWriteController {\n\tu, err := url.Parse(string(warehouseCoords))\n\tif err != nil {\n\t\tpanic(rio.ConfigError.New(\"failed to parse URI: %s\", err))\n\t}\n\tcontroller := &fileWarehouseWriteController{\n\t\tpathPrefix: u.Path,\n\t}\n\tswitch u.Scheme {\n\tcase \"file+ca\":\n\t\tcontroller.ctntAddr = true\n\t\tfallthrough\n\tcase \"file\":\n\t\t\/\/ Pick a random upload path\n\t\tcontroller.pathPrefix = filepath.Join(u.Host, controller.pathPrefix) \/\/ file uris don't have hosts\n\t\tif controller.ctntAddr {\n\t\t\tcontroller.tmpPath = filepath.Join(controller.pathPrefix, \".tmp.upload.\"+guid.New())\n\t\t} else {\n\t\t\tcontroller.tmpPath = filepath.Join(path.Dir(controller.pathPrefix), \".tmp.upload.\"+path.Base(controller.pathPrefix)+\".\"+guid.New())\n\t\t}\n\t\t\/\/ Check if warehouse path exists.\n\t\t\/\/ Warehouse is expected to exist already; transmats\n\t\t\/\/ should *not* create one whimsically, that's someone else's responsibility.\n\t\twarehouseBasePath := filepath.Dir(controller.tmpPath)\n\t\tif _, err := os.Stat(warehouseBasePath); err != nil {\n\t\t\tpanic(rio.WarehouseUnavailableError.New(\"Warehouse unavailable: %q %s\", warehouseBasePath, err))\n\t\t}\n\t\t\/\/ Open file to shovel data into\n\t\tfile, err := os.OpenFile(controller.tmpPath, os.O_CREATE|os.O_WRONLY, 0644)\n\t\tif err != nil {\n\t\t\tpanic(rio.WarehouseIOError.New(\"Unable to write %q: %s\", controller.tmpPath, err))\n\t\t}\n\t\tcontroller.stream = file\n\t\treturn controller\n\tcase \"http+ca\":\n\t\tfallthrough\n\tcase \"http\":\n\t\tfallthrough\n\tcase \"https+ca\":\n\t\tfallthrough\n\tcase \"https\":\n\t\tpanic(rio.ConfigError.New(\"http transports are only supported for read-only use\"))\n\tcase \"\":\n\t\tpanic(rio.ConfigError.New(\"missing scheme in warehouse URI; need a prefix, e.g. \\\"file:\/\/\\\" or \\\"http:\/\/\\\"\"))\n\tdefault:\n\t\tpanic(rio.ConfigError.New(\"unsupported scheme in warehouse URI: %q\", u.Scheme))\n\t}\n}\n\ntype StreamingWarehouseWriteController interface {\n\tWriter() io.Writer\n\tCommit(dataHash rio.CommitID)\n}\n\ntype fileWarehouseWriteController struct {\n\tstream io.WriteCloser\n\ttmpPath string\n\tpathPrefix string\n\tctntAddr bool\n}\n\nfunc (wc *fileWarehouseWriteController) Writer() io.Writer {\n\treturn wc.stream\n}\nfunc (wc *fileWarehouseWriteController) Commit(dataHash rio.CommitID) {\n\twc.stream.Close()\n\tvar finalPath string\n\tif wc.ctntAddr {\n\t\tfinalPath = path.Join(wc.pathPrefix, string(dataHash))\n\t} else {\n\t\tfinalPath = wc.pathPrefix\n\t}\n\tos.Rename(wc.tmpPath, finalPath)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/ Copyright 2016-2021 Authors of Cilium\n\n\/\/ This module implements Cilium's network device detection.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/datapath\/linux\/probes\"\n\t\"github.com\/cilium\/cilium\/pkg\/k8s\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\/logfields\"\n\t\"github.com\/cilium\/cilium\/pkg\/mac\"\n\t\"github.com\/cilium\/cilium\/pkg\/node\"\n\t\"github.com\/cilium\/cilium\/pkg\/option\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nvar (\n\texcludedDevicePrefixes = []string{\n\t\t\"cilium_\",\n\t\t\"lo\",\n\t\t\"lxc\",\n\t\t\"cni\",\n\t\t\"docker\",\n\t}\n\n\t\/\/ Route filter to look at all routing tables.\n\trouteFilter = netlink.Route{\n\t\tTable: unix.RT_TABLE_UNSPEC,\n\t}\n\trouteFilterMask = netlink.RT_FILTER_TABLE\n)\n\ntype DeviceManager struct {\n\tlock.Mutex\n\tdevices map[string]struct{}\n}\n\nfunc NewDeviceManager() *DeviceManager {\n\treturn &DeviceManager{\n\t\tdevices: make(map[string]struct{}),\n\t}\n}\n\n\/\/ Detect tries to detect devices to which BPF programs may be loaded.\n\/\/ See areDevicesRequired() for features that require the device information.\n\/\/\n\/\/ The devices are detected by looking at all the configured global unicast\n\/\/ routes in the system.\nfunc (dm *DeviceManager) Detect() error {\n\tdm.Lock()\n\tdefer dm.Unlock()\n\tdm.devices = make(map[string]struct{})\n\n\tif err := expandDevices(); err != nil {\n\t\treturn err\n\t}\n\n\tl3DevOK := true\n\tif !option.Config.EnableHostLegacyRouting {\n\t\t\/\/ Probe whether fast redirect is supported for L3 devices. This will\n\t\t\/\/ invoke bpftool and requires root privileges, so we're only probing\n\t\t\/\/ when necessary.\n\t\tl3DevOK = supportL3Dev()\n\t}\n\n\tif len(option.Config.Devices) == 0 && areDevicesRequired() {\n\t\t\/\/ Detect the devices from the system routing table by finding the devices\n\t\t\/\/ which have global unicast routes.\n\t\tfamily := netlink.FAMILY_ALL\n\t\tif option.Config.EnableIPv4 && !option.Config.EnableIPv6 {\n\t\t\tfamily = netlink.FAMILY_V4\n\t\t} else if !option.Config.EnableIPv4 && option.Config.EnableIPv6 {\n\t\t\tfamily = netlink.FAMILY_V6\n\t\t}\n\n\t\troutes, err := netlink.RouteListFiltered(family, &routeFilter, routeFilterMask)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot retrieve routes for device detection: %w\", err)\n\t\t}\n\t\tdm.updateDevicesFromRoutes(l3DevOK, routes)\n\t} else {\n\t\tfor _, dev := range option.Config.Devices {\n\t\t\tdm.devices[dev] = struct{}{}\n\t\t}\n\t}\n\n\tdetectDirectRoutingDev := option.Config.EnableNodePort\n\tif option.Config.DirectRoutingDevice != \"\" {\n\t\tdm.devices[option.Config.DirectRoutingDevice] = struct{}{}\n\t\tdetectDirectRoutingDev = false\n\t}\n\n\tdetectIPv6MCastDev := option.Config.EnableIPv6NDP\n\tif option.Config.IPv6MCastDevice != \"\" {\n\t\tdm.devices[option.Config.IPv6MCastDevice] = struct{}{}\n\t\tdetectIPv6MCastDev = false\n\t}\n\n\tif detectDirectRoutingDev || detectIPv6MCastDev {\n\t\tk8sNodeDev := \"\"\n\t\tk8sNodeLink, err := findK8SNodeIPLink()\n\t\tif err == nil {\n\t\t\tk8sNodeDev = k8sNodeLink.Attrs().Name\n\t\t\tdm.devices[k8sNodeDev] = struct{}{}\n\t\t} else if k8s.IsEnabled() {\n\t\t\treturn fmt.Errorf(\"k8s is enabled, but still failed to find node IP: %w\", err)\n\t\t}\n\n\t\tif detectDirectRoutingDev {\n\t\t\t\/\/ If only one device found, use that one. Otherwise use the device with k8s node IP.\n\t\t\tif len(dm.devices) == 1 {\n\t\t\t\tfor dev := range dm.devices {\n\t\t\t\t\toption.Config.DirectRoutingDevice = dev\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if k8sNodeDev != \"\" {\n\t\t\t\toption.Config.DirectRoutingDevice = k8sNodeDev\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unable to determine direct routing device. Use --%s to specify it\",\n\t\t\t\t\toption.DirectRoutingDevice)\n\t\t\t}\n\t\t\tlog.WithField(option.DirectRoutingDevice, option.Config.DirectRoutingDevice).\n\t\t\t\tInfo(\"Direct routing device detected\")\n\t\t}\n\n\t\tif detectIPv6MCastDev {\n\t\t\tif k8sNodeLink != nil && k8sNodeLink.Attrs().Flags&net.FlagMulticast != 0 {\n\t\t\t\toption.Config.IPv6MCastDevice = k8sNodeDev\n\t\t\t\tlog.WithField(option.IPv6MCastDevice, option.Config.IPv6MCastDevice).Info(\"IPv6 multicast device detected\")\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unable to determine Multicast device. Use --%s to specify it\",\n\t\t\t\t\toption.IPv6MCastDevice)\n\t\t\t}\n\t\t}\n\t}\n\n\toption.Config.Devices = dm.getDevices()\n\tlog.WithField(logfields.Devices, option.Config.Devices).Info(\"Detected devices\")\n\n\treturn nil\n}\n\n\/\/ GetDevices returns the current list of devices Cilium should attach programs to.\nfunc (dm *DeviceManager) GetDevices() []string {\n\tdm.Lock()\n\tdefer dm.Unlock()\n\treturn dm.getDevices()\n}\n\nfunc (dm *DeviceManager) getDevices() []string {\n\tdevs := make([]string, 0, len(dm.devices))\n\tfor dev := range dm.devices {\n\t\tdevs = append(devs, dev)\n\t}\n\tsort.Strings(devs)\n\treturn devs\n}\n\n\/\/ isViableDevice returns true if the given link is usable and Cilium should attach\n\/\/ programs to it.\nfunc (dm *DeviceManager) isViableDevice(l3DevOK, hasDefaultRoute bool, link netlink.Link) bool {\n\tname := link.Attrs().Name\n\n\t\/\/ Do not consider any of the excluded devices.\n\tfor _, p := range excludedDevicePrefixes {\n\t\tif strings.HasPrefix(name, p) {\n\t\t\tlog.WithField(logfields.Device, name).\n\t\t\t\tDebugf(\"Skipping device as it has excluded prefix '%s'\", p)\n\t\t\treturn false\n\t\t}\n\t}\n\n\t\/\/ Skip lower bond devices.\n\tif link.Attrs().RawFlags&unix.IFF_SLAVE != 0 {\n\t\tlog.WithField(logfields.Device, name).Debug(\"Skipping bonded device\")\n\t\treturn false\n\t}\n\n\t\/\/ Ignore L3 devices if we cannot support them.\n\tif !l3DevOK && !mac.LinkHasMacAddr(link) {\n\t\tlog.WithField(logfields.Device, name).\n\t\t\tInfo(\"Ignoring L3 device; >= 5.8 kernel is required.\")\n\t\treturn false\n\t}\n\n\t\/\/ Skip veth devices that don't have a default route.\n\t\/\/ This is a workaround for kubernetes-in-docker. We want to avoid\n\t\/\/ veth devices in general as they may be leftovers from another CNI.\n\tif !hasDefaultRoute {\n\t\t_, virtual := link.(*netlink.Veth)\n\t\tif virtual {\n\t\t\tlog.WithField(logfields.Device, name).\n\t\t\t\tDebug(\"Ignoring veth device as it has no default route\")\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}\n\ntype linkInfo struct {\n\thasDefaultRoute bool\n}\n\n\/\/ updateDevicesFromRoutes processes a batch of routes and updates the set of\n\/\/ devices. Returns true if devices changed.\nfunc (dm *DeviceManager) updateDevicesFromRoutes(l3DevOK bool, routes []netlink.Route) bool {\n\tlinkInfos := make(map[int]linkInfo)\n\n\t\/\/ Collect all link indices mentioned in the route update batch\n\tfor _, route := range routes {\n\t\t\/\/ Only consider devices that have global unicast routes,\n\t\t\/\/ e.g. skip loopback, multicast and link local routes.\n\t\tif route.Dst != nil && !route.Dst.IP.IsGlobalUnicast() {\n\t\t\tcontinue\n\t\t}\n\t\tif route.Table == unix.RT_TABLE_LOCAL {\n\t\t\tcontinue\n\t\t}\n\t\tlinkInfo := linkInfos[route.LinkIndex]\n\t\tlinkInfo.hasDefaultRoute = linkInfo.hasDefaultRoute || route.Dst == nil\n\t\tlinkInfos[route.LinkIndex] = linkInfo\n\t}\n\n\tchanged := false\n\tfor index, info := range linkInfos {\n\t\tlink, err := netlink.LinkByIndex(index)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).WithField(logfields.LinkIndex, index).\n\t\t\t\tWarn(\"Failed to get link by index\")\n\t\t\tcontinue\n\t\t}\n\t\tname := link.Attrs().Name\n\n\t\t\/\/ Skip devices we already know.\n\t\tif _, exists := dm.devices[name]; exists {\n\t\t\tcontinue\n\t\t}\n\n\t\tviable := dm.isViableDevice(l3DevOK, info.hasDefaultRoute, link)\n\t\tif viable {\n\t\t\tdm.devices[name] = struct{}{}\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn changed\n}\n\n\/\/ expandDevices expands all wildcard device names to concrete devices.\n\/\/ e.g. device \"eth+\" expands to \"eth0,eth1\" etc. Non-matching wildcards are ignored.\nfunc expandDevices() error {\n\tallLinks, err := netlink.LinkList()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Device wildcard expansion failed to fetch devices: %w\", err)\n\t}\n\texpandedDevices := make(map[string]struct{})\n\tfor _, iface := range option.Config.Devices {\n\t\tif strings.HasSuffix(iface, \"+\") {\n\t\t\tprefix := strings.TrimRight(iface, \"+\")\n\t\t\tfor _, link := range allLinks {\n\t\t\t\tattrs := link.Attrs()\n\t\t\t\tif strings.HasPrefix(attrs.Name, prefix) {\n\t\t\t\t\texpandedDevices[attrs.Name] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\texpandedDevices[iface] = struct{}{}\n\t\t}\n\t}\n\tif len(option.Config.Devices) > 0 && len(expandedDevices) == 0 {\n\t\t\/\/ User defined devices, but expansion yielded no devices. Fail here to not\n\t\t\/\/ surprise with auto-detection.\n\t\treturn fmt.Errorf(\"Device wildcard expansion failed to detect devices. Please verify --%s option.\",\n\t\t\toption.Devices)\n\t}\n\n\toption.Config.Devices = make([]string, 0, len(expandedDevices))\n\tfor dev := range expandedDevices {\n\t\toption.Config.Devices = append(option.Config.Devices, dev)\n\t}\n\tsort.Strings(option.Config.Devices)\n\treturn nil\n}\n\nfunc areDevicesRequired() bool {\n\treturn option.Config.EnableNodePort ||\n\t\toption.Config.EnableHostFirewall ||\n\t\toption.Config.EnableBandwidthManager ||\n\t\toption.Config.EnableIPSec\n}\n\nfunc findK8SNodeIPLink() (netlink.Link, error) {\n\tnodeIP := node.GetK8sNodeIP()\n\n\tif nodeIP == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to find K8s node device as node IP is not known\")\n\t}\n\n\tvar family int\n\tif nodeIP.To4() != nil {\n\t\tfamily = netlink.FAMILY_V4\n\t} else {\n\t\tfamily = netlink.FAMILY_V6\n\t}\n\n\tif addrs, err := netlink.AddrList(nil, family); err == nil {\n\t\tfor _, a := range addrs {\n\t\t\tif a.IP.Equal(nodeIP) {\n\t\t\t\tlink, err := netlink.LinkByIndex(a.LinkIndex)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn link, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"K8s node device not found\")\n}\n\n\/\/ supportL3Dev returns true if the kernel is new enough to support fast redirection of\n\/\/ packets coming from L3 devices using bpf_skb_redirect_peer.\nfunc supportL3Dev() bool {\n\tprobesManager := probes.NewProbeManager()\n\tif h := probesManager.GetHelpers(\"sched_cls\"); h != nil {\n\t\t_, found := h[\"bpf_skb_change_head\"]\n\t\treturn found\n\t}\n\treturn false\n}\n<commit_msg>daemon: Do not detect devices for IPsec<commit_after>\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/ Copyright 2016-2021 Authors of Cilium\n\n\/\/ This module implements Cilium's network device detection.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/datapath\/linux\/probes\"\n\t\"github.com\/cilium\/cilium\/pkg\/k8s\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\/logfields\"\n\t\"github.com\/cilium\/cilium\/pkg\/mac\"\n\t\"github.com\/cilium\/cilium\/pkg\/node\"\n\t\"github.com\/cilium\/cilium\/pkg\/option\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nvar (\n\texcludedDevicePrefixes = []string{\n\t\t\"cilium_\",\n\t\t\"lo\",\n\t\t\"lxc\",\n\t\t\"cni\",\n\t\t\"docker\",\n\t}\n\n\t\/\/ Route filter to look at all routing tables.\n\trouteFilter = netlink.Route{\n\t\tTable: unix.RT_TABLE_UNSPEC,\n\t}\n\trouteFilterMask = netlink.RT_FILTER_TABLE\n)\n\ntype DeviceManager struct {\n\tlock.Mutex\n\tdevices map[string]struct{}\n}\n\nfunc NewDeviceManager() *DeviceManager {\n\treturn &DeviceManager{\n\t\tdevices: make(map[string]struct{}),\n\t}\n}\n\n\/\/ Detect tries to detect devices to which BPF programs may be loaded.\n\/\/ See areDevicesRequired() for features that require the device information.\n\/\/\n\/\/ The devices are detected by looking at all the configured global unicast\n\/\/ routes in the system.\nfunc (dm *DeviceManager) Detect() error {\n\tdm.Lock()\n\tdefer dm.Unlock()\n\tdm.devices = make(map[string]struct{})\n\n\tif err := expandDevices(); err != nil {\n\t\treturn err\n\t}\n\n\tl3DevOK := true\n\tif !option.Config.EnableHostLegacyRouting {\n\t\t\/\/ Probe whether fast redirect is supported for L3 devices. This will\n\t\t\/\/ invoke bpftool and requires root privileges, so we're only probing\n\t\t\/\/ when necessary.\n\t\tl3DevOK = supportL3Dev()\n\t}\n\n\tif len(option.Config.Devices) == 0 && areDevicesRequired() {\n\t\t\/\/ Detect the devices from the system routing table by finding the devices\n\t\t\/\/ which have global unicast routes.\n\t\tfamily := netlink.FAMILY_ALL\n\t\tif option.Config.EnableIPv4 && !option.Config.EnableIPv6 {\n\t\t\tfamily = netlink.FAMILY_V4\n\t\t} else if !option.Config.EnableIPv4 && option.Config.EnableIPv6 {\n\t\t\tfamily = netlink.FAMILY_V6\n\t\t}\n\n\t\troutes, err := netlink.RouteListFiltered(family, &routeFilter, routeFilterMask)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot retrieve routes for device detection: %w\", err)\n\t\t}\n\t\tdm.updateDevicesFromRoutes(l3DevOK, routes)\n\t} else {\n\t\tfor _, dev := range option.Config.Devices {\n\t\t\tdm.devices[dev] = struct{}{}\n\t\t}\n\t}\n\n\tdetectDirectRoutingDev := option.Config.EnableNodePort\n\tif option.Config.DirectRoutingDevice != \"\" {\n\t\tdm.devices[option.Config.DirectRoutingDevice] = struct{}{}\n\t\tdetectDirectRoutingDev = false\n\t}\n\n\tdetectIPv6MCastDev := option.Config.EnableIPv6NDP\n\tif option.Config.IPv6MCastDevice != \"\" {\n\t\tdm.devices[option.Config.IPv6MCastDevice] = struct{}{}\n\t\tdetectIPv6MCastDev = false\n\t}\n\n\tif detectDirectRoutingDev || detectIPv6MCastDev {\n\t\tk8sNodeDev := \"\"\n\t\tk8sNodeLink, err := findK8SNodeIPLink()\n\t\tif err == nil {\n\t\t\tk8sNodeDev = k8sNodeLink.Attrs().Name\n\t\t\tdm.devices[k8sNodeDev] = struct{}{}\n\t\t} else if k8s.IsEnabled() {\n\t\t\treturn fmt.Errorf(\"k8s is enabled, but still failed to find node IP: %w\", err)\n\t\t}\n\n\t\tif detectDirectRoutingDev {\n\t\t\t\/\/ If only one device found, use that one. Otherwise use the device with k8s node IP.\n\t\t\tif len(dm.devices) == 1 {\n\t\t\t\tfor dev := range dm.devices {\n\t\t\t\t\toption.Config.DirectRoutingDevice = dev\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if k8sNodeDev != \"\" {\n\t\t\t\toption.Config.DirectRoutingDevice = k8sNodeDev\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unable to determine direct routing device. Use --%s to specify it\",\n\t\t\t\t\toption.DirectRoutingDevice)\n\t\t\t}\n\t\t\tlog.WithField(option.DirectRoutingDevice, option.Config.DirectRoutingDevice).\n\t\t\t\tInfo(\"Direct routing device detected\")\n\t\t}\n\n\t\tif detectIPv6MCastDev {\n\t\t\tif k8sNodeLink != nil && k8sNodeLink.Attrs().Flags&net.FlagMulticast != 0 {\n\t\t\t\toption.Config.IPv6MCastDevice = k8sNodeDev\n\t\t\t\tlog.WithField(option.IPv6MCastDevice, option.Config.IPv6MCastDevice).Info(\"IPv6 multicast device detected\")\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unable to determine Multicast device. Use --%s to specify it\",\n\t\t\t\t\toption.IPv6MCastDevice)\n\t\t\t}\n\t\t}\n\t}\n\n\toption.Config.Devices = dm.getDevices()\n\tlog.WithField(logfields.Devices, option.Config.Devices).Info(\"Detected devices\")\n\n\treturn nil\n}\n\n\/\/ GetDevices returns the current list of devices Cilium should attach programs to.\nfunc (dm *DeviceManager) GetDevices() []string {\n\tdm.Lock()\n\tdefer dm.Unlock()\n\treturn dm.getDevices()\n}\n\nfunc (dm *DeviceManager) getDevices() []string {\n\tdevs := make([]string, 0, len(dm.devices))\n\tfor dev := range dm.devices {\n\t\tdevs = append(devs, dev)\n\t}\n\tsort.Strings(devs)\n\treturn devs\n}\n\n\/\/ isViableDevice returns true if the given link is usable and Cilium should attach\n\/\/ programs to it.\nfunc (dm *DeviceManager) isViableDevice(l3DevOK, hasDefaultRoute bool, link netlink.Link) bool {\n\tname := link.Attrs().Name\n\n\t\/\/ Do not consider any of the excluded devices.\n\tfor _, p := range excludedDevicePrefixes {\n\t\tif strings.HasPrefix(name, p) {\n\t\t\tlog.WithField(logfields.Device, name).\n\t\t\t\tDebugf(\"Skipping device as it has excluded prefix '%s'\", p)\n\t\t\treturn false\n\t\t}\n\t}\n\n\t\/\/ Skip lower bond devices.\n\tif link.Attrs().RawFlags&unix.IFF_SLAVE != 0 {\n\t\tlog.WithField(logfields.Device, name).Debug(\"Skipping bonded device\")\n\t\treturn false\n\t}\n\n\t\/\/ Ignore L3 devices if we cannot support them.\n\tif !l3DevOK && !mac.LinkHasMacAddr(link) {\n\t\tlog.WithField(logfields.Device, name).\n\t\t\tInfo(\"Ignoring L3 device; >= 5.8 kernel is required.\")\n\t\treturn false\n\t}\n\n\t\/\/ Skip veth devices that don't have a default route.\n\t\/\/ This is a workaround for kubernetes-in-docker. We want to avoid\n\t\/\/ veth devices in general as they may be leftovers from another CNI.\n\tif !hasDefaultRoute {\n\t\t_, virtual := link.(*netlink.Veth)\n\t\tif virtual {\n\t\t\tlog.WithField(logfields.Device, name).\n\t\t\t\tDebug(\"Ignoring veth device as it has no default route\")\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}\n\ntype linkInfo struct {\n\thasDefaultRoute bool\n}\n\n\/\/ updateDevicesFromRoutes processes a batch of routes and updates the set of\n\/\/ devices. Returns true if devices changed.\nfunc (dm *DeviceManager) updateDevicesFromRoutes(l3DevOK bool, routes []netlink.Route) bool {\n\tlinkInfos := make(map[int]linkInfo)\n\n\t\/\/ Collect all link indices mentioned in the route update batch\n\tfor _, route := range routes {\n\t\t\/\/ Only consider devices that have global unicast routes,\n\t\t\/\/ e.g. skip loopback, multicast and link local routes.\n\t\tif route.Dst != nil && !route.Dst.IP.IsGlobalUnicast() {\n\t\t\tcontinue\n\t\t}\n\t\tif route.Table == unix.RT_TABLE_LOCAL {\n\t\t\tcontinue\n\t\t}\n\t\tlinkInfo := linkInfos[route.LinkIndex]\n\t\tlinkInfo.hasDefaultRoute = linkInfo.hasDefaultRoute || route.Dst == nil\n\t\tlinkInfos[route.LinkIndex] = linkInfo\n\t}\n\n\tchanged := false\n\tfor index, info := range linkInfos {\n\t\tlink, err := netlink.LinkByIndex(index)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).WithField(logfields.LinkIndex, index).\n\t\t\t\tWarn(\"Failed to get link by index\")\n\t\t\tcontinue\n\t\t}\n\t\tname := link.Attrs().Name\n\n\t\t\/\/ Skip devices we already know.\n\t\tif _, exists := dm.devices[name]; exists {\n\t\t\tcontinue\n\t\t}\n\n\t\tviable := dm.isViableDevice(l3DevOK, info.hasDefaultRoute, link)\n\t\tif viable {\n\t\t\tdm.devices[name] = struct{}{}\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn changed\n}\n\n\/\/ expandDevices expands all wildcard device names to concrete devices.\n\/\/ e.g. device \"eth+\" expands to \"eth0,eth1\" etc. Non-matching wildcards are ignored.\nfunc expandDevices() error {\n\tallLinks, err := netlink.LinkList()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Device wildcard expansion failed to fetch devices: %w\", err)\n\t}\n\texpandedDevices := make(map[string]struct{})\n\tfor _, iface := range option.Config.Devices {\n\t\tif strings.HasSuffix(iface, \"+\") {\n\t\t\tprefix := strings.TrimRight(iface, \"+\")\n\t\t\tfor _, link := range allLinks {\n\t\t\t\tattrs := link.Attrs()\n\t\t\t\tif strings.HasPrefix(attrs.Name, prefix) {\n\t\t\t\t\texpandedDevices[attrs.Name] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\texpandedDevices[iface] = struct{}{}\n\t\t}\n\t}\n\tif len(option.Config.Devices) > 0 && len(expandedDevices) == 0 {\n\t\t\/\/ User defined devices, but expansion yielded no devices. Fail here to not\n\t\t\/\/ surprise with auto-detection.\n\t\treturn fmt.Errorf(\"Device wildcard expansion failed to detect devices. Please verify --%s option.\",\n\t\t\toption.Devices)\n\t}\n\n\toption.Config.Devices = make([]string, 0, len(expandedDevices))\n\tfor dev := range expandedDevices {\n\t\toption.Config.Devices = append(option.Config.Devices, dev)\n\t}\n\tsort.Strings(option.Config.Devices)\n\treturn nil\n}\n\nfunc areDevicesRequired() bool {\n\treturn option.Config.EnableNodePort ||\n\t\toption.Config.EnableHostFirewall ||\n\t\toption.Config.EnableBandwidthManager\n}\n\nfunc findK8SNodeIPLink() (netlink.Link, error) {\n\tnodeIP := node.GetK8sNodeIP()\n\n\tif nodeIP == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to find K8s node device as node IP is not known\")\n\t}\n\n\tvar family int\n\tif nodeIP.To4() != nil {\n\t\tfamily = netlink.FAMILY_V4\n\t} else {\n\t\tfamily = netlink.FAMILY_V6\n\t}\n\n\tif addrs, err := netlink.AddrList(nil, family); err == nil {\n\t\tfor _, a := range addrs {\n\t\t\tif a.IP.Equal(nodeIP) {\n\t\t\t\tlink, err := netlink.LinkByIndex(a.LinkIndex)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn link, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"K8s node device not found\")\n}\n\n\/\/ supportL3Dev returns true if the kernel is new enough to support fast redirection of\n\/\/ packets coming from L3 devices using bpf_skb_redirect_peer.\nfunc supportL3Dev() bool {\n\tprobesManager := probes.NewProbeManager()\n\tif h := probesManager.GetHelpers(\"sched_cls\"); h != nil {\n\t\t_, found := h[\"bpf_skb_change_head\"]\n\t\treturn found\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package os runs processes locally\npackage os\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/micro\/go-micro\/v3\/runtime\/local\/process\"\n)\n\nfunc (p *Process) Exec(exe *process.Executable) error {\n\tcmd := exec.Command(exe.Package.Path)\n\treturn cmd.Run()\n}\n\nfunc (p *Process) Fork(exe *process.Executable) (*process.PID, error) {\n\t\/\/ create command\n\tcmd := exec.Command(exe.Package.Path, exe.Args...)\n\t\/\/ set env vars\n\tcmd.Env = append(cmd.Env, exe.Env...)\n\n\tin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ter, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ start the process\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &process.PID{\n\t\tID: fmt.Sprintf(\"%d\", cmd.Process.Pid),\n\t\tInput: in,\n\t\tOutput: out,\n\t\tError: er,\n\t}, nil\n}\n\nfunc (p *Process) Kill(pid *process.PID) error {\n\tid, err := strconv.Atoi(pid.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := os.FindProcess(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now kill it\n\terr = pr.Kill()\n\n\t\/\/ return the kill error\n\treturn err\n}\n\nfunc (p *Process) Wait(pid *process.PID) error {\n\tid, err := strconv.Atoi(pid.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := os.FindProcess(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tps, err := pr.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ps.Success() {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(ps.String())\n}\n<commit_msg>executable is now os<commit_after>\/\/ Package os runs processes locally\npackage os\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/micro\/go-micro\/v3\/runtime\/local\/process\"\n)\n\nfunc (p *Process) Exec(exe *process.Exec) error {\n\tcmd := exec.Command(exe.Package.Path)\n\treturn cmd.Run()\n}\n\nfunc (p *Process) Fork(exe *process.Exec) (*process.PID, error) {\n\t\/\/ create command\n\tcmd := exec.Command(exe.Package.Path, exe.Args...)\n\t\/\/ set env vars\n\tcmd.Env = append(cmd.Env, exe.Env...)\n\n\tin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ter, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ start the process\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &process.PID{\n\t\tID: fmt.Sprintf(\"%d\", cmd.Process.Pid),\n\t\tInput: in,\n\t\tOutput: out,\n\t\tError: er,\n\t}, nil\n}\n\nfunc (p *Process) Kill(pid *process.PID) error {\n\tid, err := strconv.Atoi(pid.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := os.FindProcess(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now kill it\n\terr = pr.Kill()\n\n\t\/\/ return the kill error\n\treturn err\n}\n\nfunc (p *Process) Wait(pid *process.PID) error {\n\tid, err := strconv.Atoi(pid.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpr, err := os.FindProcess(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tps, err := pr.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ps.Success() {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(ps.String())\n}\n<|endoftext|>"} {"text":"<commit_before>package planner\n\nimport \"strings\"\n\nvar (\n\tRuntimeK3S = \"k3s\"\n\tRuntimeRKE2 = \"rke2\"\n)\n\nfunc GetRuntimeCommand(kubernetesVersion string) string {\n\treturn strings.ToLower(GetRuntime(kubernetesVersion))\n}\n\nfunc GetRuntimeServerUnit(kubernetesVersion string) string {\n\tif GetRuntime(kubernetesVersion) == RuntimeK3S {\n\t\treturn RuntimeK3S\n\t}\n\treturn RuntimeRKE2 + \"-server\"\n}\n\nfunc GetRuntimeEnv(kubernetesVersion string) string {\n\treturn strings.ToUpper(GetRuntimeEnv(kubernetesVersion))\n}\n\nfunc GetRuntime(kubernetesVersion string) string {\n\tif strings.Contains(kubernetesVersion, RuntimeK3S) {\n\t\treturn RuntimeK3S\n\t}\n\treturn RuntimeRKE2\n}\n<commit_msg>Fix stack overflow<commit_after>package planner\n\nimport \"strings\"\n\nvar (\n\tRuntimeK3S = \"k3s\"\n\tRuntimeRKE2 = \"rke2\"\n)\n\nfunc GetRuntimeCommand(kubernetesVersion string) string {\n\treturn strings.ToLower(GetRuntime(kubernetesVersion))\n}\n\nfunc GetRuntimeServerUnit(kubernetesVersion string) string {\n\tif GetRuntime(kubernetesVersion) == RuntimeK3S {\n\t\treturn RuntimeK3S\n\t}\n\treturn RuntimeRKE2 + \"-server\"\n}\n\nfunc GetRuntimeEnv(kubernetesVersion string) string {\n\treturn strings.ToUpper(GetRuntime(kubernetesVersion))\n}\n\nfunc GetRuntime(kubernetesVersion string) string {\n\tif strings.Contains(kubernetesVersion, RuntimeK3S) {\n\t\treturn RuntimeK3S\n\t}\n\treturn RuntimeRKE2\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ package tcpreaderwrapper wraps a gopacket tcpassembly.tcpreader.ReaderStream\npackage tcpreaderwrapper\n\nimport (\n\t\"code.google.com\/p\/gopacket\/tcpassembly\"\n\t\"code.google.com\/p\/gopacket\/tcpassembly\/tcpreader\"\n)\n\ntype ReaderStreamWrapper struct {\n\ttcpreader.ReaderStream\n\tReassemblies []ReassemblyInfo\n}\n\n\/\/ NewReaderStream returns a new ReaderStreamWrapper object.\nfunc NewReaderStreamWrapper() ReaderStreamWrapper {\n\tr := ReaderStreamWrapper{\n\t\tReaderStream: tcpreader.NewReaderStream(),\n\t\tReassemblies: make([]ReassemblyInfo, 0),\n\t}\n\tr.ReaderStream.ReaderStreamOptions.LossErrors = true\n\treturn r\n}\n\n\/\/ Reassembled implements tcpassembly.Stream's Reassembled function.\nfunc (r *ReaderStreamWrapper) Reassembled(reassembly []tcpassembly.Reassembly) {\n\t\/\/ keep track of sizes and times to reconstruct\n\tfor _, re := range reassembly {\n\t\tr.Reassemblies = append(r.Reassemblies, newReassemblyInfo(re))\n\t}\n\tr.ReaderStream.Reassembled(reassembly)\n}\n<commit_msg>Disable LossErrors<commit_after>\/\/ package tcpreaderwrapper wraps a gopacket tcpassembly.tcpreader.ReaderStream\n\/\/ and holds recent resassemblies to fetch timing information\npackage tcpreaderwrapper\n\nimport (\n\t\"code.google.com\/p\/gopacket\/tcpassembly\"\n\t\"code.google.com\/p\/gopacket\/tcpassembly\/tcpreader\"\n)\n\ntype ReaderStreamWrapper struct {\n\ttcpreader.ReaderStream\n\tReassemblies []ReassemblyInfo\n}\n\n\/\/ NewReaderStream returns a new ReaderStreamWrapper object.\nfunc NewReaderStreamWrapper() ReaderStreamWrapper {\n\tr := ReaderStreamWrapper{\n\t\tReaderStream: tcpreader.NewReaderStream(),\n\t\tReassemblies: make([]ReassemblyInfo, 0),\n\t}\n\t\/\/\tr.ReaderStream.ReaderStreamOptions.LossErrors = true\n\treturn r\n}\n\n\/\/ Reassembled implements tcpassembly.Stream's Reassembled function.\nfunc (r *ReaderStreamWrapper) Reassembled(reassembly []tcpassembly.Reassembly) {\n\t\/\/ keep track of sizes and times to reconstruct\n\tfor _, re := range reassembly {\n\t\tr.Reassemblies = append(r.Reassemblies, newReassemblyInfo(re))\n\t}\n\tr.ReaderStream.Reassembled(reassembly)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package cloudflare is a dns Provider for cloudflare\npackage cloudflare\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cloudflare\/cloudflare-go\"\n\tmiekdns \"github.com\/miekg\/dns\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\tdns \"github.com\/micro\/micro\/network\/dns\/proto\/dns\"\n\t\"github.com\/micro\/micro\/network\/dns\/provider\"\n)\n\ntype cfProvider struct {\n\tapi *cloudflare.API\n\tzoneID string\n}\n\n\/\/ New returns a configured cloudflare DNS provider\nfunc New(apiToken, zoneID string) (provider.Provider, error) {\n\tapi, err := cloudflare.NewWithAPIToken(apiToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &cfProvider{\n\t\tapi: api,\n\t\tzoneID: zoneID,\n\t}, nil\n}\n\nfunc (cf *cfProvider) Advertise(records ...*dns.Record) error {\n\tfor _, r := range records {\n\t\t_, err := cf.api.CreateDNSRecord(cf.zoneID, cloudflare.DNSRecord{\n\t\t\tName: r.GetName(),\n\t\t\tContent: r.GetValue(),\n\t\t\tType: r.GetType(),\n\t\t\tPriority: int(r.GetPriority()),\n\t\t\tTTL: 1,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc (cf *cfProvider) Remove(records ...*dns.Record) error {\n\texisting := make(map[string]map[string]cloudflare.DNSRecord)\n\texistingRecords, err := cf.api.DNSRecords(cf.zoneID, cloudflare.DNSRecord{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, e := range existingRecords {\n\t\tif _, found := existing[e.Name]; !found {\n\t\t\texisting[e.Name] = make(map[string]cloudflare.DNSRecord)\n\t\t}\n\t\texisting[e.Name][e.Content] = e\n\t}\n\tfor _, r := range records {\n\t\tif _, found := existing[r.Name]; !found {\n\t\t\treturn errors.New(\"Record \" + r.Name + \" could not be deleted as it doesn't exist\")\n\t\t}\n\t\ttoDelete, found := existing[r.Name][r.Value]\n\t\tif !found {\n\t\t\treturn errors.New(\"Record \" + r.Name + \" with address \" + r.Value + \" could not be deleted as it doesn't exist\")\n\t\t}\n\t\terr := cf.api.DeleteDNSRecord(cf.zoneID, toDelete.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (cf *cfProvider) Resolve(name, recordType string) ([]*dns.Record, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tdnstype, found := miekdns.StringToType[recordType]\n\tif !found {\n\t\treturn nil, errors.New(recordType + \" is not a valid record type\")\n\t}\n\tm := new(miekdns.Msg)\n\tm.SetQuestion(miekdns.Fqdn(name), dnstype)\n\tr, err := miekdns.ExchangeContext(ctx, m, \"8.8.8.8:53\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar response []*dns.Record\n\tfor _, answer := range r.Answer {\n\t\th := answer.Header()\n\t\trec := &dns.Record{\n\t\t\tName: h.Name,\n\t\t\tType: miekdns.TypeToString[h.Rrtype],\n\t\t\tTtl: answer.Header().Ttl,\n\t\t}\n\t\tif rec.Type != recordType {\n\t\t\tlog.Trace(\"Tried to look up a \" + recordType + \" record but got a \" + rec.Type)\n\t\t\tcontinue\n\t\t}\n\t\tswitch rec.Type {\n\t\tcase \"A\":\n\t\t\tarecord, _ := answer.(*miekdns.A)\n\t\t\trec.Value = arecord.A.String()\n\t\tcase \"AAAA\":\n\t\t\taaaarecord := answer.(*miekdns.AAAA)\n\t\t\trec.Value = aaaarecord.AAAA.String()\n\t\tcase \"TXT\":\n\t\t\ttxtrecord := answer.(*miekdns.TXT)\n\t\t\trec.Value = strings.Join(txtrecord.Txt, \"\")\n\t\tcase \"MX\":\n\t\t\tmxrecord := answer.(*miekdns.MX)\n\t\t\trec.Value = mxrecord.Mx\n\t\t\trec.Priority = uint32(mxrecord.Preference)\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Can't handle record type \" + rec.Type)\n\t\t}\n\t\tresponse = append(response, rec)\n\t}\n\treturn response, nil\n}\n<commit_msg>Cloudflare resolver should use Cloudflare DNS<commit_after>\/\/ Package cloudflare is a dns Provider for cloudflare\npackage cloudflare\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cloudflare\/cloudflare-go\"\n\tmiekdns \"github.com\/miekg\/dns\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\tdns \"github.com\/micro\/micro\/network\/dns\/proto\/dns\"\n\t\"github.com\/micro\/micro\/network\/dns\/provider\"\n)\n\ntype cfProvider struct {\n\tapi *cloudflare.API\n\tzoneID string\n}\n\n\/\/ New returns a configured cloudflare DNS provider\nfunc New(apiToken, zoneID string) (provider.Provider, error) {\n\tapi, err := cloudflare.NewWithAPIToken(apiToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &cfProvider{\n\t\tapi: api,\n\t\tzoneID: zoneID,\n\t}, nil\n}\n\nfunc (cf *cfProvider) Advertise(records ...*dns.Record) error {\n\tfor _, r := range records {\n\t\t_, err := cf.api.CreateDNSRecord(cf.zoneID, cloudflare.DNSRecord{\n\t\t\tName: r.GetName(),\n\t\t\tContent: r.GetValue(),\n\t\t\tType: r.GetType(),\n\t\t\tPriority: int(r.GetPriority()),\n\t\t\tTTL: 1,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc (cf *cfProvider) Remove(records ...*dns.Record) error {\n\texisting := make(map[string]map[string]cloudflare.DNSRecord)\n\texistingRecords, err := cf.api.DNSRecords(cf.zoneID, cloudflare.DNSRecord{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, e := range existingRecords {\n\t\tif _, found := existing[e.Name]; !found {\n\t\t\texisting[e.Name] = make(map[string]cloudflare.DNSRecord)\n\t\t}\n\t\texisting[e.Name][e.Content] = e\n\t}\n\tfor _, r := range records {\n\t\tif _, found := existing[r.Name]; !found {\n\t\t\treturn errors.New(\"Record \" + r.Name + \" could not be deleted as it doesn't exist\")\n\t\t}\n\t\ttoDelete, found := existing[r.Name][r.Value]\n\t\tif !found {\n\t\t\treturn errors.New(\"Record \" + r.Name + \" with address \" + r.Value + \" could not be deleted as it doesn't exist\")\n\t\t}\n\t\terr := cf.api.DeleteDNSRecord(cf.zoneID, toDelete.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (cf *cfProvider) Resolve(name, recordType string) ([]*dns.Record, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tdnstype, found := miekdns.StringToType[recordType]\n\tif !found {\n\t\treturn nil, errors.New(recordType + \" is not a valid record type\")\n\t}\n\tm := new(miekdns.Msg)\n\tm.SetQuestion(miekdns.Fqdn(name), dnstype)\n\tr, err := miekdns.ExchangeContext(ctx, m, \"1.0.0.1:53\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar response []*dns.Record\n\tfor _, answer := range r.Answer {\n\t\th := answer.Header()\n\t\trec := &dns.Record{\n\t\t\tName: h.Name,\n\t\t\tType: miekdns.TypeToString[h.Rrtype],\n\t\t\tTtl: answer.Header().Ttl,\n\t\t}\n\t\tif rec.Type != recordType {\n\t\t\tlog.Trace(\"Tried to look up a \" + recordType + \" record but got a \" + rec.Type)\n\t\t\tcontinue\n\t\t}\n\t\tswitch rec.Type {\n\t\tcase \"A\":\n\t\t\tarecord, _ := answer.(*miekdns.A)\n\t\t\trec.Value = arecord.A.String()\n\t\tcase \"AAAA\":\n\t\t\taaaarecord := answer.(*miekdns.AAAA)\n\t\t\trec.Value = aaaarecord.AAAA.String()\n\t\tcase \"TXT\":\n\t\t\ttxtrecord := answer.(*miekdns.TXT)\n\t\t\trec.Value = strings.Join(txtrecord.Txt, \"\")\n\t\tcase \"MX\":\n\t\t\tmxrecord := answer.(*miekdns.MX)\n\t\t\trec.Value = mxrecord.Mx\n\t\t\trec.Priority = uint32(mxrecord.Preference)\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Can't handle record type \" + rec.Type)\n\t\t}\n\t\tresponse = append(response, rec)\n\t}\n\treturn response, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package build\n\nimport (\n\t\"fmt\"\n\n\tbuildv1 \"github.com\/openshift\/api\/build\/v1\"\n)\n\n\/\/ PredicateFunc is testing an argument and decides does it meet some criteria or not.\ntype PredicateFunc func(interface{}) bool\n\n\/\/ FilterBuilds returns array of builds that satisfies predicate function.\nfunc FilterBuilds(builds []buildv1.Build, predicate PredicateFunc) []buildv1.Build {\n\tif len(builds) == 0 {\n\t\treturn builds\n\t}\n\n\tresult := make([]buildv1.Build, 0)\n\tfor _, build := range builds {\n\t\tif predicate(build) {\n\t\t\tresult = append(result, build)\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/ ByBuildConfigPredicate matches all builds that have build config annotation or label with specified value.\nfunc ByBuildConfigPredicate(labelValue string) PredicateFunc {\n\treturn func(arg interface{}) bool {\n\t\treturn hasBuildConfigAnnotation(arg.(buildv1.Build), BuildConfigAnnotation, labelValue) ||\n\t\t\thasBuildConfigLabel(arg.(buildv1.Build), BuildConfigLabel, labelValue)\n\t}\n}\n\nfunc hasBuildConfigLabel(build buildv1.Build, labelName, labelValue string) bool {\n\tvalue, ok := build.Labels[labelName]\n\treturn ok && value == labelValue\n}\n\nfunc hasBuildConfigAnnotation(build buildv1.Build, annotationName, annotationValue string) bool {\n\tif build.Annotations == nil {\n\t\treturn false\n\t}\n\tvalue, ok := build.Annotations[annotationName]\n\treturn ok && value == annotationValue\n}\n\n\/\/ BuildNameForConfigVersion returns the name of the version-th build\n\/\/ for the config that has the provided name.\nfunc BuildNameForConfigVersion(name string, version int) string {\n\treturn fmt.Sprintf(\"%s-%d\", name, version)\n}\n<commit_msg>build: move StrategyType to staging<commit_after>package build\n\nimport (\n\t\"fmt\"\n\n\tbuildv1 \"github.com\/openshift\/api\/build\/v1\"\n)\n\n\/\/ PredicateFunc is testing an argument and decides does it meet some criteria or not.\ntype PredicateFunc func(interface{}) bool\n\n\/\/ FilterBuilds returns array of builds that satisfies predicate function.\nfunc FilterBuilds(builds []buildv1.Build, predicate PredicateFunc) []buildv1.Build {\n\tif len(builds) == 0 {\n\t\treturn builds\n\t}\n\n\tresult := make([]buildv1.Build, 0)\n\tfor _, build := range builds {\n\t\tif predicate(build) {\n\t\t\tresult = append(result, build)\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/ ByBuildConfigPredicate matches all builds that have build config annotation or label with specified value.\nfunc ByBuildConfigPredicate(labelValue string) PredicateFunc {\n\treturn func(arg interface{}) bool {\n\t\treturn hasBuildConfigAnnotation(arg.(buildv1.Build), BuildConfigAnnotation, labelValue) ||\n\t\t\thasBuildConfigLabel(arg.(buildv1.Build), BuildConfigLabel, labelValue)\n\t}\n}\n\nfunc hasBuildConfigLabel(build buildv1.Build, labelName, labelValue string) bool {\n\tvalue, ok := build.Labels[labelName]\n\treturn ok && value == labelValue\n}\n\nfunc hasBuildConfigAnnotation(build buildv1.Build, annotationName, annotationValue string) bool {\n\tif build.Annotations == nil {\n\t\treturn false\n\t}\n\tvalue, ok := build.Annotations[annotationName]\n\treturn ok && value == annotationValue\n}\n\n\/\/ BuildNameForConfigVersion returns the name of the version-th build\n\/\/ for the config that has the provided name.\nfunc BuildNameForConfigVersion(name string, version int) string {\n\treturn fmt.Sprintf(\"%s-%d\", name, version)\n}\n\nfunc StrategyType(strategy buildv1.BuildStrategy) string {\n\tswitch {\n\tcase strategy.DockerStrategy != nil:\n\t\treturn \"Docker\"\n\tcase strategy.CustomStrategy != nil:\n\t\treturn \"Custom\"\n\tcase strategy.SourceStrategy != nil:\n\t\treturn \"Source\"\n\tcase strategy.JenkinsPipelineStrategy != nil:\n\t\treturn \"JenkinsPipeline\"\n\t}\n\treturn \"\"\n}<|endoftext|>"} {"text":"<commit_before>package annotations\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"regexp\"\n\n\t\"github.com\/Financial-Times\/neo-model-utils-go\/mapper\"\n\t\"github.com\/Financial-Times\/neo-utils-go\/neoutils\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/jmcvetta\/neoism\"\n)\n\nvar uuidExtractRegex = regexp.MustCompile(\".*\/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$\")\n\n\/\/ Service interface. Compatible with the baserwftapp service EXCEPT for\n\/\/ 1) the Write function, which has signature Write(thing interface{}) error...\n\/\/ 2) the DecodeJson function, which has signature DecodeJSON(*json.Decoder) (thing interface{}, identity string, err error)\n\/\/ The problem is that we have a list of things, and the uuid is for a related OTHER thing\n\/\/ TODO - move to implement a shared defined Service interface?\ntype Service interface {\n\tWrite(contentUUID string, thing interface{}) (err error)\n\tRead(contentUUID string) (thing interface{}, found bool, err error)\n\tDelete(contentUUID string) (found bool, err error)\n\tCheck() (err error)\n\tDecodeJSON(*json.Decoder) (thing interface{}, err error)\n\tCount() (int, error)\n\tInitialise() error\n}\n\n\/\/holds the Neo4j-specific information\ntype service struct {\n\tcypherRunner neoutils.CypherRunner\n\tindexManager neoutils.IndexManager\n\tplatformVersion string\n}\n\n\/\/NewAnnotationsService instantiate driver\nfunc NewAnnotationsService(cypherRunner neoutils.CypherRunner, indexManager neoutils.IndexManager, platformVersion string) service {\n\tif platformVersion == \"\" {\n\t\tlog.Fatalf(\"PlatformVersion was not specified!\")\n\t}\n\treturn service{cypherRunner, indexManager, platformVersion}\n}\n\n\/\/ DecodeJSON decodes to a list of annotations, for ease of use this is a struct itself\nfunc (s service) DecodeJSON(dec *json.Decoder) (interface{}, error) {\n\ta := annotations{}\n\terr := dec.Decode(&a)\n\treturn a, err\n}\n\nfunc (s service) Read(contentUUID string) (thing interface{}, found bool, err error) {\n\tresults := []annotation{}\n\n\t\/\/TODO shouldn't return Provenances if none of the scores, agentRole or atTime are set\n\tstatementTemplate := `\n\t\t\t\t\tMATCH (c:Thing{uuid:{contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing)\n\t\t\t\t\tWITH c, cc, rel, {id:cc.uuid,prefLabel:cc.prefLabel,types:labels(cc),predicate:type(rel)} as thing,\n\t\t\t\t\tcollect(\n\t\t\t\t\t\t{scores:[\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.relevanceScore},\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.confidenceScore}],\n\t\t\t\t\t\tagentRole:rel.annotatedBy,\n\t\t\t\t\t\tatTime:rel.annotatedDate}) as provenances\n\t\t\t\t\tRETURN thing, provenances ORDER BY thing.id\n\t\t\t\t\t\t\t\t\t`\n\tstatement := fmt.Sprintf(statementTemplate, relevanceScoringSystem, confidenceScoringSystem)\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: statement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion},\n\t\tResult: &results,\n\t}\n\terr = s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\tif err != nil {\n\t\tlog.Errorf(\"Error looking up uuid %s with query %s from neoism: %+v\", contentUUID, query.Statement, err)\n\t\treturn annotations{}, false, fmt.Errorf(\"Error accessing Annotations datastore for uuid: %s\", contentUUID)\n\t}\n\tlog.Debugf(\"CypherResult Read Annotations for uuid: %s was: %+v\", contentUUID, results)\n\tif (len(results)) == 0 {\n\t\treturn annotations{}, false, nil\n\t}\n\n\tfor idx := range results {\n\t\tmapToResponseFormat(&results[idx])\n\t}\n\n\treturn results, true, nil\n}\n\n\/\/Delete removes all the annotations for this content. Ignore the nodes on either end -\n\/\/may leave nodes that are only 'things' inserted by this writer: clean up\n\/\/as a result of this will need to happen externally if required\nfunc (s service) Delete(contentUUID string) (bool, error) {\n\n\tvar deleteStatement string\n\n\tif s.platformVersion == \"v2\" {\n\t\tdeleteStatement = `MATCH (c:Thing{uuid: {contentUUID}})-[rel:MENTIONS{platformVersion:{platformVersion}}]->(cc:Thing) DELETE rel`\n\t} else {\n\t\tdeleteStatement = `MATCH (c:Thing{uuid: {contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing) DELETE rel`\n\t}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: deleteStatement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion},\n\t\tIncludeStats: true,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tstats, err := query.Stats()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar found bool\n\tif stats.ContainsUpdates {\n\t\tfound = true\n\t}\n\n\treturn found, err\n}\n\n\/\/Write a set of annotations associated with a piece of content. Any annotations\n\/\/already there will be removed\nfunc (s service) Write(contentUUID string, thing interface{}) (err error) {\n\tannotationsToWrite := thing.(annotations)\n\n\tif contentUUID == \"\" {\n\t\treturn errors.New(\"Content uuid is required\")\n\t}\n\tif err := validateAnnotations(&annotationsToWrite); err != nil {\n\t\tlog.Warnf(\"Validation of supplied annotations failed\")\n\t\treturn err\n\t}\n\n\tif len(annotationsToWrite) == 0 {\n\t\tlog.Warnf(\"No new annotations supplied for content uuid: %s\", contentUUID)\n\t}\n\n\tqueries := append([]*neoism.CypherQuery{}, dropAllAnnotationsQuery(contentUUID, s.platformVersion))\n\n\tvar statements = []string{}\n\tfor _, annotationToWrite := range annotationsToWrite {\n\t\tquery, err := createAnnotationQuery(contentUUID, annotationToWrite, s.platformVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatements = append(statements, query.Statement)\n\t\tqueries = append(queries, query)\n\t}\n\tlog.Infof(\"Updated Annotations for content uuid: %s\", contentUUID)\n\tlog.Debugf(\"For update, ran statements: %+v\", statements)\n\n\treturn s.cypherRunner.CypherBatch(queries)\n}\n\n\/\/ Check tests neo4j by running a simple cypher query\nfunc (s service) Check() error {\n\treturn neoutils.Check(s.cypherRunner)\n}\n\nfunc (s service) Count() (int, error) {\n\tresults := []struct {\n\t\tCount int `json:\"c\"`\n\t}{}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: `MATCH ()-[r{platformVersion:{platformVersion}}]->() RETURN count(r) as c`,\n\t\tParameters: neoism.Props{\"platformVersion\": s.platformVersion},\n\t\tResult: &results,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn results[0].Count, nil\n}\n\nfunc (s service) Initialise() error {\n\treturn nil \/\/ No constraints need to be set up\n}\n\nfunc createAnnotationRelationship(relation string) (statement string) {\n\tstmt := `\n MERGE (content:Thing{uuid:{contentID}})\n MERGE (upp:Identifier:UPPIdentifier{value:{conceptID}})\n MERGE (upp)-[:IDENTIFIES]->(concept:Thing) ON CREATE SET concept.uuid = {conceptID}\n MERGE (content)-[pred:%s{platformVersion:{platformVersion}}]->(concept)\n SET pred={annProps}\n `\n\tstatement = fmt.Sprintf(stmt, relation)\n\treturn statement\n}\n\nfunc getRelationshipFromPredicate(predicate string) (relation string) {\n\tif predicate != \"\" {\n\t\trelation = relations[predicate]\n\t} else {\n\t\trelation = relations[\"mentions\"]\n\t}\n\treturn relation\n}\n\nfunc createAnnotationQuery(contentUUID string, ann annotation, platformVersion string) (*neoism.CypherQuery, error) {\n\tquery := neoism.CypherQuery{}\n\tthingID, err := extractUUIDFromURI(ann.Thing.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/todo temporary change to deal with multiple provenances\n\t\/*if len(ann.Provenances) > 1 {\n\t\treturn nil, errors.New(\"Cannot insert a MENTIONS annotation with multiple provenances\")\n\t}*\/\n\n\tvar prov provenance\n\tparams := map[string]interface{}{}\n\tparams[\"platformVersion\"] = platformVersion\n\n\tif len(ann.Provenances) >= 1 {\n\t\tprov = ann.Provenances[0]\n\t\tannotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, supplied, err := extractDataFromProvenance(&prov)\n\n\t\tif err != nil {\n\t\t\tlog.Infof(\"ERROR=%s\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif supplied == true {\n\t\t\tif annotatedBy != \"\" {\n\t\t\t\tparams[\"annotatedBy\"] = annotatedBy\n\t\t\t}\n\t\t\tif prov.AtTime != \"\" {\n\t\t\t\tparams[\"annotatedDateEpoch\"] = annotatedDateEpoch\n\t\t\t\tparams[\"annotatedDate\"] = prov.AtTime\n\t\t\t}\n\t\t\tparams[\"relevanceScore\"] = relevanceScore\n\t\t\tparams[\"confidenceScore\"] = confidenceScore\n\t\t}\n\t}\n\n\trelation := getRelationshipFromPredicate(ann.Thing.Predicate)\n\tquery.Statement = createAnnotationRelationship(relation)\n\tquery.Parameters = map[string]interface{}{\n\t\t\"contentID\": contentUUID,\n\t\t\"conceptID\": thingID,\n\t\t\"platformVersion\": platformVersion,\n\t\t\"annProps\": params,\n\t}\n\treturn &query, nil\n}\n\nfunc extractDataFromProvenance(prov *provenance) (string, int64, float64, float64, bool, error) {\n\tif len(prov.Scores) == 0 {\n\t\treturn \"\", -1, -1, -1, false, nil\n\t}\n\tvar annotatedBy string\n\tvar annotatedDateEpoch int64\n\tvar confidenceScore, relevanceScore float64\n\tvar err error\n\tif prov.AgentRole != \"\" {\n\t\tannotatedBy, err = extractUUIDFromURI(prov.AgentRole)\n\t}\n\tif prov.AtTime != \"\" {\n\t\tannotatedDateEpoch, err = convertAnnotatedDateToEpoch(prov.AtTime)\n\t}\n\trelevanceScore, confidenceScore, err = extractScores(prov.Scores)\n\n\tif err != nil {\n\t\treturn \"\", -1, -1, -1, true, err\n\t}\n\treturn annotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, true, nil\n}\n\nfunc extractUUIDFromURI(uri string) (string, error) {\n\tresult := uuidExtractRegex.FindStringSubmatch(uri)\n\tif len(result) == 2 {\n\t\treturn result[1], nil\n\t}\n\treturn \"\", fmt.Errorf(\"Couldn't extract uuid from uri %s\", uri)\n}\n\nfunc convertAnnotatedDateToEpoch(annotatedDateString string) (int64, error) {\n\tdatetimeEpoch, err := time.Parse(time.RFC3339, annotatedDateString)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn datetimeEpoch.Unix(), nil\n}\n\nfunc extractScores(scores []score) (float64, float64, error) {\n\tvar relevanceScore, confidenceScore float64\n\tfor _, score := range scores {\n\t\tscoringSystem := score.ScoringSystem\n\t\tvalue := score.Value\n\t\tswitch scoringSystem {\n\t\tcase relevanceScoringSystem:\n\t\t\trelevanceScore = value\n\t\tcase confidenceScoringSystem:\n\t\t\tconfidenceScore = value\n\t\t}\n\t}\n\treturn relevanceScore, confidenceScore, nil\n}\n\nfunc dropAllAnnotationsQuery(contentUUID string, platformVersion string) *neoism.CypherQuery {\n\n\tvar matchStmtTemplate string\n\n\t\/\/TODO hard-coded verification:\n\t\/\/ -> necessary for brands - which got written by content-api with isClassifiedBy relationship, and should not be deleted by annotations-rw\n\t\/\/ -> so far brands are the only v2 concepts which have isClassifiedBy relationship; as soon as this changes: implementation needs to be updated\n\tif platformVersion == \"v2\" {\n\t\tmatchStmtTemplate = `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r:MENTIONS{platformVersion:{platformVersion}}]->(t:Thing)\n DELETE r`\n\t} else {\n\t\tmatchStmtTemplate = `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r]->(t:Thing)\n\t\t\tWHERE r.platformVersion={platformVersion}\n DELETE r`\n\t}\n\n\tquery := neoism.CypherQuery{}\n\tquery.Statement = matchStmtTemplate\n\tquery.Parameters = neoism.Props{\"contentID\": contentUUID, \"platformVersion\": platformVersion}\n\treturn &query\n}\n\nfunc validateAnnotations(annotations *annotations) error {\n\t\/\/TODO - for consistency, we should probably just not create the annotation?\n\tfor _, annotation := range *annotations {\n\t\tif annotation.Thing.ID == \"\" {\n\t\t\treturn ValidationError{fmt.Sprintf(\"Concept uuid missing for annotation %+v\", annotation)}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ValidationError is thrown when the annotations are not valid because mandatory information is missing\ntype ValidationError struct {\n\tMsg string\n}\n\nfunc (v ValidationError) Error() string {\n\treturn v.Msg\n}\n\nfunc mapToResponseFormat(ann *annotation) {\n\tann.Thing.ID = mapper.IDURL(ann.Thing.ID)\n\t\/\/ We expect only ONE provenance - provenance value is considered valid even if the AgentRole is not specified. See: v1 - isClassifiedBy\n\tfor idx := range ann.Provenances {\n\t\tif ann.Provenances[idx].AgentRole != \"\" {\n\t\t\tann.Provenances[idx].AgentRole = mapper.IDURL(ann.Provenances[idx].AgentRole)\n\t\t}\n\t}\n}\n<commit_msg>Changing cypher to include lifecycle, TODO: Tests<commit_after>package annotations\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"regexp\"\n\n\t\"github.com\/Financial-Times\/neo-model-utils-go\/mapper\"\n\t\"github.com\/Financial-Times\/neo-utils-go\/neoutils\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/jmcvetta\/neoism\"\n)\n\nvar uuidExtractRegex = regexp.MustCompile(\".*\/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$\")\n\n\/\/ Service interface. Compatible with the baserwftapp service EXCEPT for\n\/\/ 1) the Write function, which has signature Write(thing interface{}) error...\n\/\/ 2) the DecodeJson function, which has signature DecodeJSON(*json.Decoder) (thing interface{}, identity string, err error)\n\/\/ The problem is that we have a list of things, and the uuid is for a related OTHER thing\n\/\/ TODO - move to implement a shared defined Service interface?\ntype Service interface {\n\tWrite(contentUUID string, thing interface{}) (err error)\n\tRead(contentUUID string) (thing interface{}, found bool, err error)\n\tDelete(contentUUID string) (found bool, err error)\n\tCheck() (err error)\n\tDecodeJSON(*json.Decoder) (thing interface{}, err error)\n\tCount() (int, error)\n\tInitialise() error\n}\n\n\/\/holds the Neo4j-specific information\ntype service struct {\n\tcypherRunner neoutils.CypherRunner\n\tindexManager neoutils.IndexManager\n\tplatformVersion string\n}\n\n\/\/NewAnnotationsService instantiate driver\nfunc NewAnnotationsService(cypherRunner neoutils.CypherRunner, indexManager neoutils.IndexManager, platformVersion string) service {\n\tif platformVersion == \"\" {\n\t\tlog.Fatalf(\"PlatformVersion was not specified!\")\n\t}\n\treturn service{cypherRunner, indexManager, platformVersion}\n}\n\n\/\/ DecodeJSON decodes to a list of annotations, for ease of use this is a struct itself\nfunc (s service) DecodeJSON(dec *json.Decoder) (interface{}, error) {\n\ta := annotations{}\n\terr := dec.Decode(&a)\n\treturn a, err\n}\n\nfunc (s service) Read(contentUUID string) (thing interface{}, found bool, err error) {\n\tresults := []annotation{}\n\n\t\/\/TODO shouldn't return Provenances if none of the scores, agentRole or atTime are set\n\tstatementTemplate := `\n\t\t\t\t\tMATCH (c:Thing{uuid:{contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing)\n\t\t\t\t\tWHERE rel.lifecycle = {lifecycle}\n\t\t\t\t\tOR rel.lifecycle IS NULL\n\t\t\t\t\tWITH c, cc, rel, {id:cc.uuid,prefLabel:cc.prefLabel,types:labels(cc),predicate:type(rel)} as thing,\n\t\t\t\t\tcollect(\n\t\t\t\t\t\t{scores:[\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.relevanceScore},\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.confidenceScore}],\n\t\t\t\t\t\tagentRole:rel.annotatedBy,\n\t\t\t\t\t\tatTime:rel.annotatedDate}) as provenances\n\t\t\t\t\tRETURN thing, provenances ORDER BY thing.id\n\t\t\t\t\t\t\t\t\t`\n\tstatement := fmt.Sprintf(statementTemplate, relevanceScoringSystem, confidenceScoringSystem)\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: statement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion, \"lifecycle\": \"annotations-\" + s.platformVersion},\n\t\tResult: &results,\n\t}\n\terr = s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\tif err != nil {\n\t\tlog.Errorf(\"Error looking up uuid %s with query %s from neoism: %+v\", contentUUID, query.Statement, err)\n\t\treturn annotations{}, false, fmt.Errorf(\"Error accessing Annotations datastore for uuid: %s\", contentUUID)\n\t}\n\tlog.Debugf(\"CypherResult Read Annotations for uuid: %s was: %+v\", contentUUID, results)\n\tif (len(results)) == 0 {\n\t\treturn annotations{}, false, nil\n\t}\n\n\tfor idx := range results {\n\t\tmapToResponseFormat(&results[idx])\n\t}\n\n\treturn results, true, nil\n}\n\n\/\/Delete removes all the annotations for this content. Ignore the nodes on either end -\n\/\/may leave nodes that are only 'things' inserted by this writer: clean up\n\/\/as a result of this will need to happen externally if required\nfunc (s service) Delete(contentUUID string) (bool, error) {\n\tdeleteStatement := `MATCH (c:Thing{uuid: {contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing)\n\t\t\t\t\t\t\t\t\t\t\tWHERE rel.lifecycle = {lifecycle}\n\t\t\t\t\t\t\t\t\t\t\tOR rel.lifecycle IS NULL\n\t \t\t\t\t\t\t\t\t\t\tDELETE rel`\n\tquery := &neoism.CypherQuery{\n\t\tStatement: deleteStatement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion, \"lifecycle\": \"annotations-\" + s.platformVersion},\n\t\tIncludeStats: true,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tstats, err := query.Stats()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar found bool\n\tif stats.ContainsUpdates {\n\t\tfound = true\n\t}\n\n\treturn found, err\n}\n\n\/\/Write a set of annotations associated with a piece of content. Any annotations\n\/\/already there will be removed\nfunc (s service) Write(contentUUID string, thing interface{}) (err error) {\n\tannotationsToWrite := thing.(annotations)\n\n\tif contentUUID == \"\" {\n\t\treturn errors.New(\"Content uuid is required\")\n\t}\n\tif err := validateAnnotations(&annotationsToWrite); err != nil {\n\t\tlog.Warnf(\"Validation of supplied annotations failed\")\n\t\treturn err\n\t}\n\n\tif len(annotationsToWrite) == 0 {\n\t\tlog.Warnf(\"No new annotations supplied for content uuid: %s\", contentUUID)\n\t}\n\n\tqueries := append([]*neoism.CypherQuery{}, dropAllAnnotationsQuery(contentUUID, s.platformVersion))\n\n\tvar statements = []string{}\n\tfor _, annotationToWrite := range annotationsToWrite {\n\t\tquery, err := createAnnotationQuery(contentUUID, annotationToWrite, s.platformVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatements = append(statements, query.Statement)\n\t\tqueries = append(queries, query)\n\t}\n\tlog.Infof(\"Updated Annotations for content uuid: %s\", contentUUID)\n\tlog.Debugf(\"For update, ran statements: %+v\", statements)\n\n\treturn s.cypherRunner.CypherBatch(queries)\n}\n\n\/\/ Check tests neo4j by running a simple cypher query\nfunc (s service) Check() error {\n\treturn neoutils.Check(s.cypherRunner)\n}\n\nfunc (s service) Count() (int, error) {\n\tresults := []struct {\n\t\tCount int `json:\"c\"`\n\t}{}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: `MATCH ()-[r{platformVersion:{platformVersion}}]->()\n\t\t\t\t\t\t\t\tWHERE rel.lifecycle = {lifecycle}\n\t\t\t\t\t\t\t\tOR rel.lifecycle IS NULL\n\t\t\t\t\t\t\t\tRETURN count(r) as c`,\n\t\tParameters: neoism.Props{\"platformVersion\": s.platformVersion, \"lifecycle\": \"annotations-\" + s.platformVersion},\n\t\tResult: &results,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn results[0].Count, nil\n}\n\nfunc (s service) Initialise() error {\n\treturn nil \/\/ No constraints need to be set up\n}\n\nfunc createAnnotationRelationship(relation string) (statement string) {\n\tstmt := `\n MERGE (content:Thing{uuid:{contentID}})\n MERGE (upp:Identifier:UPPIdentifier{value:{conceptID}})\n MERGE (upp)-[:IDENTIFIES]->(concept:Thing) ON CREATE SET concept.uuid = {conceptID}\n MERGE (content)-[pred:%s {platformVersion:{platformVersion}, lifecycle: {lifecycle}}]->(concept)\n SET pred={annProps}\n `\n\tstatement = fmt.Sprintf(stmt, relation)\n\treturn statement\n}\n\nfunc getRelationshipFromPredicate(predicate string) (relation string) {\n\tif predicate != \"\" {\n\t\trelation = relations[predicate]\n\t} else {\n\t\trelation = relations[\"mentions\"]\n\t}\n\treturn relation\n}\n\nfunc createAnnotationQuery(contentUUID string, ann annotation, platformVersion string) (*neoism.CypherQuery, error) {\n\tquery := neoism.CypherQuery{}\n\tthingID, err := extractUUIDFromURI(ann.Thing.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/todo temporary change to deal with multiple provenances\n\t\/*if len(ann.Provenances) > 1 {\n\t\treturn nil, errors.New(\"Cannot insert a MENTIONS annotation with multiple provenances\")\n\t}*\/\n\n\tvar prov provenance\n\tparams := map[string]interface{}{}\n\tparams[\"platformVersion\"] = platformVersion\n\n\tif len(ann.Provenances) >= 1 {\n\t\tprov = ann.Provenances[0]\n\t\tannotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, supplied, err := extractDataFromProvenance(&prov)\n\n\t\tif err != nil {\n\t\t\tlog.Infof(\"ERROR=%s\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif supplied == true {\n\t\t\tif annotatedBy != \"\" {\n\t\t\t\tparams[\"annotatedBy\"] = annotatedBy\n\t\t\t}\n\t\t\tif prov.AtTime != \"\" {\n\t\t\t\tparams[\"annotatedDateEpoch\"] = annotatedDateEpoch\n\t\t\t\tparams[\"annotatedDate\"] = prov.AtTime\n\t\t\t}\n\t\t\tparams[\"relevanceScore\"] = relevanceScore\n\t\t\tparams[\"confidenceScore\"] = confidenceScore\n\t\t}\n\t}\n\n\trelation := getRelationshipFromPredicate(ann.Thing.Predicate)\n\tquery.Statement = createAnnotationRelationship(relation)\n\tquery.Parameters = map[string]interface{}{\n\t\t\"contentID\": contentUUID,\n\t\t\"conceptID\": thingID,\n\t\t\"platformVersion\": platformVersion,\n\t\t\"lifecycle\": \"annotations-\" + platformVersion,\n\t\t\"annProps\": params,\n\t}\n\treturn &query, nil\n}\n\nfunc extractDataFromProvenance(prov *provenance) (string, int64, float64, float64, bool, error) {\n\tif len(prov.Scores) == 0 {\n\t\treturn \"\", -1, -1, -1, false, nil\n\t}\n\tvar annotatedBy string\n\tvar annotatedDateEpoch int64\n\tvar confidenceScore, relevanceScore float64\n\tvar err error\n\tif prov.AgentRole != \"\" {\n\t\tannotatedBy, err = extractUUIDFromURI(prov.AgentRole)\n\t}\n\tif prov.AtTime != \"\" {\n\t\tannotatedDateEpoch, err = convertAnnotatedDateToEpoch(prov.AtTime)\n\t}\n\trelevanceScore, confidenceScore, err = extractScores(prov.Scores)\n\n\tif err != nil {\n\t\treturn \"\", -1, -1, -1, true, err\n\t}\n\treturn annotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, true, nil\n}\n\nfunc extractUUIDFromURI(uri string) (string, error) {\n\tresult := uuidExtractRegex.FindStringSubmatch(uri)\n\tif len(result) == 2 {\n\t\treturn result[1], nil\n\t}\n\treturn \"\", fmt.Errorf(\"Couldn't extract uuid from uri %s\", uri)\n}\n\nfunc convertAnnotatedDateToEpoch(annotatedDateString string) (int64, error) {\n\tdatetimeEpoch, err := time.Parse(time.RFC3339, annotatedDateString)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn datetimeEpoch.Unix(), nil\n}\n\nfunc extractScores(scores []score) (float64, float64, error) {\n\tvar relevanceScore, confidenceScore float64\n\tfor _, score := range scores {\n\t\tscoringSystem := score.ScoringSystem\n\t\tvalue := score.Value\n\t\tswitch scoringSystem {\n\t\tcase relevanceScoringSystem:\n\t\t\trelevanceScore = value\n\t\tcase confidenceScoringSystem:\n\t\t\tconfidenceScore = value\n\t\t}\n\t}\n\treturn relevanceScore, confidenceScore, nil\n}\n\nfunc dropAllAnnotationsQuery(contentUUID string, platformVersion string) *neoism.CypherQuery {\n\tmatchStmtTemplate := `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r {platformVersion:{platformVersion}, lifecycle: {lifecycle}}]->(t:Thing)\n\t\t\t\t\t\t\t\t\t\t\tDELETE r`\n\n\tquery := neoism.CypherQuery{}\n\tquery.Statement = matchStmtTemplate\n\tquery.Parameters = neoism.Props{\"contentID\": contentUUID, \"platformVersion\": platformVersion, \"lifecycle\": \"annotations-\" + platformVersion}\n\treturn &query\n}\n\nfunc validateAnnotations(annotations *annotations) error {\n\t\/\/TODO - for consistency, we should probably just not create the annotation?\n\tfor _, annotation := range *annotations {\n\t\tif annotation.Thing.ID == \"\" {\n\t\t\treturn ValidationError{fmt.Sprintf(\"Concept uuid missing for annotation %+v\", annotation)}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ValidationError is thrown when the annotations are not valid because mandatory information is missing\ntype ValidationError struct {\n\tMsg string\n}\n\nfunc (v ValidationError) Error() string {\n\treturn v.Msg\n}\n\nfunc mapToResponseFormat(ann *annotation) {\n\tann.Thing.ID = mapper.IDURL(ann.Thing.ID)\n\t\/\/ We expect only ONE provenance - provenance value is considered valid even if the AgentRole is not specified. See: v1 - isClassifiedBy\n\tfor idx := range ann.Provenances {\n\t\tif ann.Provenances[idx].AgentRole != \"\" {\n\t\t\tann.Provenances[idx].AgentRole = mapper.IDURL(ann.Provenances[idx].AgentRole)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017-2020 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package grub implements a grub config file parser.\n\/\/\n\/\/ See the grub manual https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/ for\n\/\/ a reference of the configuration format\n\/\/ In particular the following pages:\n\/\/ - https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/html_node\/Shell_002dlike-scripting.html\n\/\/ - https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/html_node\/Commands.html\n\/\/\n\/\/ Currently, only the linux[16|efi], initrd[16|efi], menuentry and set\n\/\/ directives are partially supported.\npackage grub\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/u-root\/u-root\/pkg\/boot\"\n\t\"github.com\/u-root\/u-root\/pkg\/boot\/multiboot\"\n\t\"github.com\/u-root\/u-root\/pkg\/curl\"\n\t\"github.com\/u-root\/u-root\/pkg\/shlex\"\n\t\"github.com\/u-root\/u-root\/pkg\/uio\"\n)\n\nvar (\n\t\/\/ ErrInitrdUsedWithoutLinux is returned when an initrd directive is\n\t\/\/ not following a linux directive in the same menu entry\n\tErrInitrdUsedWithoutLinux = errors.New(\"missing linux directive before initrd\")\n\t\/\/ ErrModuleUsedWithoutMultiboot is returned when a module directive is\n\t\/\/ not following a multiboot directive in the same menu entry\n\tErrModuleUsedWithoutMultiboot = errors.New(\"missing multiboot directive before module\")\n)\n\nvar probeGrubFiles = []string{\n\t\"boot\/grub\/grub.cfg\",\n\t\"grub\/grub.cfg\",\n\t\"grub2\/grub.cfg\",\n}\n\n\/\/ ParseLocalConfig looks for a GRUB config in the disk partition mounted at\n\/\/ diskDir and parses out OSes to boot.\n\/\/\n\/\/ This... is at best crude, at worst totally wrong, since we fundamentally\n\/\/ assume that the kernels we boot are only on this one partition. But so is\n\/\/ this whole parser.\nfunc ParseLocalConfig(ctx context.Context, diskDir string) ([]boot.OSImage, error) {\n\twd := &url.URL{\n\t\tScheme: \"file\",\n\t\tPath: diskDir,\n\t}\n\n\tfor _, relname := range probeGrubFiles {\n\t\tc, err := ParseConfigFile(ctx, curl.DefaultSchemes, relname, wd)\n\t\tif curl.IsURLError(err) {\n\t\t\tcontinue\n\t\t}\n\t\treturn c, err\n\t}\n\treturn nil, fmt.Errorf(\"no valid grub config found\")\n}\n\n\/\/ ParseConfigFile parses a grub configuration as specified in\n\/\/ https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/\n\/\/\n\/\/ Currently, only the linux[16|efi], initrd[16|efi], menuentry and set\n\/\/ directives are partially supported.\n\/\/\n\/\/ `wd` is the default scheme, host, and path for any files named as a\n\/\/ relative path - e.g. kernel, include, and initramfs paths are requested\n\/\/ relative to the wd.\nfunc ParseConfigFile(ctx context.Context, s curl.Schemes, configFile string, wd *url.URL) ([]boot.OSImage, error) {\n\tp := newParser(wd, s)\n\tif err := p.appendFile(ctx, configFile); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Don't add entries twice.\n\t\/\/\n\t\/\/ Multiple labels can refer to the same image, so we have to dedup by pointer.\n\tseenLinux := make(map[*boot.LinuxImage]struct{})\n\tseenMB := make(map[*boot.MultibootImage]struct{})\n\n\tif len(p.defaultEntry) > 0 {\n\t\tp.labelOrder = append([]string{p.defaultEntry}, p.labelOrder...)\n\t}\n\n\tvar images []boot.OSImage\n\tfor _, label := range p.labelOrder {\n\t\tif img, ok := p.linuxEntries[label]; ok {\n\t\t\tif _, ok := seenLinux[img]; !ok {\n\t\t\t\timages = append(images, img)\n\t\t\t\tseenLinux[img] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\tif img, ok := p.mbEntries[label]; ok {\n\t\t\tif _, ok := seenMB[img]; !ok {\n\t\t\t\timages = append(images, img)\n\t\t\t\tseenMB[img] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\treturn images, nil\n}\n\ntype parser struct {\n\tlinuxEntries map[string]*boot.LinuxImage\n\tmbEntries map[string]*boot.MultibootImage\n\n\tlabelOrder []string\n\tdefaultEntry string\n\n\tW io.Writer\n\n\t\/\/ parser internals.\n\tnumEntry int\n\n\t\/\/ curEntry is the current entry number as a string.\n\tcurEntry string\n\n\t\/\/ curLabel is the last parsed label from a \"menuentry\".\n\tcurLabel string\n\n\twd *url.URL\n\tschemes curl.Schemes\n}\n\n\/\/ newParser returns a new grub parser using working directory `wd`\n\/\/ and schemes `s`.\n\/\/\n\/\/ If a path encountered in a configuration file is relative instead of a full\n\/\/ URL, `wd` is used as the \"working directory\" of that relative path; the\n\/\/ resulting URL is roughly `wd.String()\/path`.\n\/\/\n\/\/ `s` is used to get files referred to by URLs.\nfunc newParser(wd *url.URL, s curl.Schemes) *parser {\n\treturn &parser{\n\t\tlinuxEntries: make(map[string]*boot.LinuxImage),\n\t\tmbEntries: make(map[string]*boot.MultibootImage),\n\t\twd: wd,\n\t\tschemes: s,\n\t}\n}\n\nfunc parseURL(surl string, wd *url.URL) (*url.URL, error) {\n\tu, err := url.Parse(surl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not parse URL %q: %v\", surl, err)\n\t}\n\n\tif len(u.Scheme) == 0 {\n\t\tu.Scheme = wd.Scheme\n\n\t\tif len(u.Host) == 0 {\n\t\t\t\/\/ If this is not there, it was likely just a path.\n\t\t\tu.Host = wd.Host\n\t\t\tu.Path = filepath.Join(wd.Path, filepath.Clean(u.Path))\n\t\t}\n\t}\n\treturn u, nil\n}\n\n\/\/ getFile parses `url` relative to the config's working directory and returns\n\/\/ an io.Reader for the requested url.\n\/\/\n\/\/ If url is just a relative path and not a full URL, c.wd is used as the\n\/\/ \"working directory\" of that relative path; the resulting URL is roughly\n\/\/ path.Join(wd.String(), url).\nfunc (c *parser) getFile(url string) (io.ReaderAt, error) {\n\tu, err := parseURL(url, c.wd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.schemes.LazyFetch(u)\n}\n\n\/\/ appendFile parses the config file downloaded from `url` and adds it to `c`.\nfunc (c *parser) appendFile(ctx context.Context, url string) error {\n\tu, err := parseURL(url, c.wd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Fetching %s\", u)\n\n\tr, err := c.schemes.Fetch(ctx, u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig, err := uio.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(config) > 500 {\n\t\t\/\/ Avoid flooding the console on real systems\n\t\t\/\/ TODO: do we want to pass a verbose flag or a logger?\n\t\tlog.Printf(\"Got config file %s\", r)\n\t} else {\n\t\tlog.Printf(\"Got config file %s:\\n%s\\n\", r, string(config))\n\t}\n\treturn c.append(ctx, string(config))\n}\n\n\/\/ CmdlineQuote quotes the command line as grub-core\/lib\/cmdline.c does\nfunc cmdlineQuote(args []string) string {\n\tq := make([]string, len(args))\n\tfor i, s := range args {\n\t\ts = strings.Replace(s, `\\`, `\\\\`, -1)\n\t\ts = strings.Replace(s, `'`, `\\'`, -1)\n\t\ts = strings.Replace(s, `\"`, `\\\"`, -1)\n\t\tif strings.ContainsRune(s, ' ') {\n\t\t\ts = `\"` + s + `\"`\n\t\t}\n\t\tq[i] = s\n\t}\n\treturn strings.Join(q, \" \")\n}\n\n\/\/ append parses `config` and adds the respective configuration to `c`.\nfunc (c *parser) append(ctx context.Context, config string) error {\n\t\/\/ Here's a shitty parser.\n\tfor _, line := range strings.Split(config, \"\\n\") {\n\t\tkv := shlex.Argv(line)\n\t\tif len(kv) < 1 {\n\t\t\tcontinue\n\t\t}\n\t\tdirective := strings.ToLower(kv[0])\n\t\t\/\/ Used by tests (allow no parameters here)\n\t\tif c.W != nil && directive == \"echo\" {\n\t\t\tfmt.Fprintf(c.W, \"echo:%#v\\n\", kv[1:])\n\t\t}\n\n\t\tif len(kv) <= 1 {\n\t\t\tcontinue\n\t\t}\n\t\targ := kv[1]\n\n\t\tswitch directive {\n\t\tcase \"set\":\n\t\t\tvals := strings.SplitN(arg, \"=\", 2)\n\t\t\tif len(vals) == 2 {\n\t\t\t\t\/\/TODO handle vars? bootVars[vals[0]] = vals[1]\n\t\t\t\t\/\/log.Printf(\"grubvar: %s=%s\", vals[0], vals[1])\n\t\t\t\tif vals[0] == \"default\" {\n\t\t\t\t\tc.defaultEntry = vals[1]\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"configfile\":\n\t\t\t\/\/ TODO test that\n\t\t\tif err := c.appendFile(ctx, arg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase \"menuentry\":\n\t\t\tc.curEntry = strconv.Itoa(c.numEntry)\n\t\t\tc.curLabel = arg\n\t\t\tc.numEntry++\n\t\t\tc.labelOrder = append(c.labelOrder, c.curEntry, c.curLabel)\n\n\t\tcase \"linux\", \"linux16\", \"linuxefi\":\n\t\t\tk, err := c.getFile(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ from grub manual: \"Any initrd must be reloaded after using this command\" so we can replace the entry\n\t\t\tentry := &boot.LinuxImage{\n\t\t\t\tName: c.curLabel,\n\t\t\t\tKernel: k,\n\t\t\t\tCmdline: cmdlineQuote(kv[2:]),\n\t\t\t}\n\t\t\tc.linuxEntries[c.curEntry] = entry\n\t\t\tc.linuxEntries[c.curLabel] = entry\n\n\t\tcase \"initrd\", \"initrd16\", \"initrdefi\":\n\t\t\ti, err := c.getFile(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tentry, ok := c.linuxEntries[c.curEntry]\n\t\t\tif !ok {\n\t\t\t\treturn ErrInitrdUsedWithoutLinux\n\t\t\t}\n\t\t\tentry.Initrd = i\n\n\t\tcase \"multiboot\":\n\t\t\t\/\/ TODO handle --quirk-* arguments ? (change parsing)\n\t\t\tk, err := c.getFile(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ from grub manual: \"Any initrd must be reloaded after using this command\" so we can replace the entry\n\t\t\tentry := &boot.MultibootImage{\n\t\t\t\tName: c.curLabel,\n\t\t\t\tKernel: k,\n\t\t\t\tCmdline: cmdlineQuote(kv[2:]),\n\t\t\t}\n\t\t\tc.mbEntries[c.curEntry] = entry\n\t\t\tc.mbEntries[c.curLabel] = entry\n\n\t\tcase \"module\":\n\t\t\t\/\/ TODO handle --nounzip arguments ? (change parsing)\n\t\t\tm, err := c.getFile(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tentry, ok := c.mbEntries[c.curEntry]\n\t\t\tif !ok {\n\t\t\t\treturn ErrModuleUsedWithoutMultiboot\n\t\t\t}\n\t\t\t\/\/ TODO: Lasy tryGzipFilter(m)\n\t\t\tmod := multiboot.Module{\n\t\t\t\tModule: m,\n\t\t\t\tName: arg,\n\t\t\t\tCmdLine: cmdlineQuote(kv[2:]),\n\t\t\t}\n\t\t\tentry.Modules = append(entry.Modules, mod)\n\n\t\t}\n\t}\n\treturn nil\n\n}\n<commit_msg>grub: make entry access safe and don't fatal out<commit_after>\/\/ Copyright 2017-2020 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package grub implements a grub config file parser.\n\/\/\n\/\/ See the grub manual https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/ for\n\/\/ a reference of the configuration format\n\/\/ In particular the following pages:\n\/\/ - https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/html_node\/Shell_002dlike-scripting.html\n\/\/ - https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/html_node\/Commands.html\n\/\/\n\/\/ Currently, only the linux[16|efi], initrd[16|efi], menuentry and set\n\/\/ directives are partially supported.\npackage grub\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/u-root\/u-root\/pkg\/boot\"\n\t\"github.com\/u-root\/u-root\/pkg\/boot\/multiboot\"\n\t\"github.com\/u-root\/u-root\/pkg\/curl\"\n\t\"github.com\/u-root\/u-root\/pkg\/shlex\"\n\t\"github.com\/u-root\/u-root\/pkg\/uio\"\n)\n\nvar probeGrubFiles = []string{\n\t\"boot\/grub\/grub.cfg\",\n\t\"grub\/grub.cfg\",\n\t\"grub2\/grub.cfg\",\n}\n\n\/\/ ParseLocalConfig looks for a GRUB config in the disk partition mounted at\n\/\/ diskDir and parses out OSes to boot.\n\/\/\n\/\/ This... is at best crude, at worst totally wrong, since we fundamentally\n\/\/ assume that the kernels we boot are only on this one partition. But so is\n\/\/ this whole parser.\nfunc ParseLocalConfig(ctx context.Context, diskDir string) ([]boot.OSImage, error) {\n\twd := &url.URL{\n\t\tScheme: \"file\",\n\t\tPath: diskDir,\n\t}\n\n\tfor _, relname := range probeGrubFiles {\n\t\tc, err := ParseConfigFile(ctx, curl.DefaultSchemes, relname, wd)\n\t\tif curl.IsURLError(err) {\n\t\t\tcontinue\n\t\t}\n\t\treturn c, err\n\t}\n\treturn nil, fmt.Errorf(\"no valid grub config found\")\n}\n\n\/\/ ParseConfigFile parses a grub configuration as specified in\n\/\/ https:\/\/www.gnu.org\/software\/grub\/manual\/grub\/\n\/\/\n\/\/ Currently, only the linux[16|efi], initrd[16|efi], menuentry and set\n\/\/ directives are partially supported.\n\/\/\n\/\/ `wd` is the default scheme, host, and path for any files named as a\n\/\/ relative path - e.g. kernel, include, and initramfs paths are requested\n\/\/ relative to the wd.\nfunc ParseConfigFile(ctx context.Context, s curl.Schemes, configFile string, wd *url.URL) ([]boot.OSImage, error) {\n\tp := newParser(wd, s)\n\tif err := p.appendFile(ctx, configFile); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Don't add entries twice.\n\t\/\/\n\t\/\/ Multiple labels can refer to the same image, so we have to dedup by pointer.\n\tseenLinux := make(map[*boot.LinuxImage]struct{})\n\tseenMB := make(map[*boot.MultibootImage]struct{})\n\n\tif len(p.defaultEntry) > 0 {\n\t\tp.labelOrder = append([]string{p.defaultEntry}, p.labelOrder...)\n\t}\n\n\tvar images []boot.OSImage\n\tfor _, label := range p.labelOrder {\n\t\tif img, ok := p.linuxEntries[label]; ok {\n\t\t\tif _, ok := seenLinux[img]; !ok {\n\t\t\t\timages = append(images, img)\n\t\t\t\tseenLinux[img] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\tif img, ok := p.mbEntries[label]; ok {\n\t\t\tif _, ok := seenMB[img]; !ok {\n\t\t\t\timages = append(images, img)\n\t\t\t\tseenMB[img] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\treturn images, nil\n}\n\ntype parser struct {\n\tlinuxEntries map[string]*boot.LinuxImage\n\tmbEntries map[string]*boot.MultibootImage\n\n\tlabelOrder []string\n\tdefaultEntry string\n\n\tW io.Writer\n\n\t\/\/ parser internals.\n\tnumEntry int\n\n\t\/\/ curEntry is the current entry number as a string.\n\tcurEntry string\n\n\t\/\/ curLabel is the last parsed label from a \"menuentry\".\n\tcurLabel string\n\n\twd *url.URL\n\tschemes curl.Schemes\n}\n\n\/\/ newParser returns a new grub parser using working directory `wd`\n\/\/ and schemes `s`.\n\/\/\n\/\/ If a path encountered in a configuration file is relative instead of a full\n\/\/ URL, `wd` is used as the \"working directory\" of that relative path; the\n\/\/ resulting URL is roughly `wd.String()\/path`.\n\/\/\n\/\/ `s` is used to get files referred to by URLs.\nfunc newParser(wd *url.URL, s curl.Schemes) *parser {\n\treturn &parser{\n\t\tlinuxEntries: make(map[string]*boot.LinuxImage),\n\t\tmbEntries: make(map[string]*boot.MultibootImage),\n\t\twd: wd,\n\t\tschemes: s,\n\t}\n}\n\nfunc parseURL(surl string, wd *url.URL) (*url.URL, error) {\n\tu, err := url.Parse(surl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not parse URL %q: %v\", surl, err)\n\t}\n\n\tif len(u.Scheme) == 0 {\n\t\tu.Scheme = wd.Scheme\n\n\t\tif len(u.Host) == 0 {\n\t\t\t\/\/ If this is not there, it was likely just a path.\n\t\t\tu.Host = wd.Host\n\t\t\tu.Path = filepath.Join(wd.Path, filepath.Clean(u.Path))\n\t\t}\n\t}\n\treturn u, nil\n}\n\n\/\/ getFile parses `url` relative to the config's working directory and returns\n\/\/ an io.Reader for the requested url.\n\/\/\n\/\/ If url is just a relative path and not a full URL, c.wd is used as the\n\/\/ \"working directory\" of that relative path; the resulting URL is roughly\n\/\/ path.Join(wd.String(), url).\nfunc (c *parser) getFile(url string) (io.ReaderAt, error) {\n\tu, err := parseURL(url, c.wd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.schemes.LazyFetch(u)\n}\n\n\/\/ appendFile parses the config file downloaded from `url` and adds it to `c`.\nfunc (c *parser) appendFile(ctx context.Context, url string) error {\n\tu, err := parseURL(url, c.wd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Fetching %s\", u)\n\n\tr, err := c.schemes.Fetch(ctx, u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig, err := uio.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(config) > 500 {\n\t\t\/\/ Avoid flooding the console on real systems\n\t\t\/\/ TODO: do we want to pass a verbose flag or a logger?\n\t\tlog.Printf(\"Got config file %s\", r)\n\t} else {\n\t\tlog.Printf(\"Got config file %s:\\n%s\\n\", r, string(config))\n\t}\n\treturn c.append(ctx, string(config))\n}\n\n\/\/ CmdlineQuote quotes the command line as grub-core\/lib\/cmdline.c does\nfunc cmdlineQuote(args []string) string {\n\tq := make([]string, len(args))\n\tfor i, s := range args {\n\t\ts = strings.Replace(s, `\\`, `\\\\`, -1)\n\t\ts = strings.Replace(s, `'`, `\\'`, -1)\n\t\ts = strings.Replace(s, `\"`, `\\\"`, -1)\n\t\tif strings.ContainsRune(s, ' ') {\n\t\t\ts = `\"` + s + `\"`\n\t\t}\n\t\tq[i] = s\n\t}\n\treturn strings.Join(q, \" \")\n}\n\n\/\/ append parses `config` and adds the respective configuration to `c`.\n\/\/\n\/\/ NOTE: This parser has outlived its usefulness already, given that it doesn't\n\/\/ even understand the {} scoping in GRUB. But let's get the tests to pass, and\n\/\/ then we can do a rewrite.\nfunc (c *parser) append(ctx context.Context, config string) error {\n\t\/\/ Here's a shitty parser.\n\tfor _, line := range strings.Split(config, \"\\n\") {\n\t\tkv := shlex.Argv(line)\n\t\tif len(kv) < 1 {\n\t\t\tcontinue\n\t\t}\n\t\tdirective := strings.ToLower(kv[0])\n\t\t\/\/ Used by tests (allow no parameters here)\n\t\tif c.W != nil && directive == \"echo\" {\n\t\t\tfmt.Fprintf(c.W, \"echo:%#v\\n\", kv[1:])\n\t\t}\n\n\t\tif len(kv) <= 1 {\n\t\t\tcontinue\n\t\t}\n\t\targ := kv[1]\n\n\t\tswitch directive {\n\t\tcase \"set\":\n\t\t\tvals := strings.SplitN(arg, \"=\", 2)\n\t\t\tif len(vals) == 2 {\n\t\t\t\t\/\/TODO handle vars? bootVars[vals[0]] = vals[1]\n\t\t\t\t\/\/log.Printf(\"grubvar: %s=%s\", vals[0], vals[1])\n\t\t\t\tif vals[0] == \"default\" {\n\t\t\t\t\tc.defaultEntry = vals[1]\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"configfile\":\n\t\t\t\/\/ TODO test that\n\t\t\tif err := c.appendFile(ctx, arg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase \"menuentry\":\n\t\t\tc.curEntry = strconv.Itoa(c.numEntry)\n\t\t\tc.curLabel = arg\n\t\t\tc.numEntry++\n\t\t\tc.labelOrder = append(c.labelOrder, c.curEntry, c.curLabel)\n\n\t\tcase \"linux\", \"linux16\", \"linuxefi\":\n\t\t\tk, err := c.getFile(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ from grub manual: \"Any initrd must be reloaded after using this command\" so we can replace the entry\n\t\t\tentry := &boot.LinuxImage{\n\t\t\t\tName: c.curLabel,\n\t\t\t\tKernel: k,\n\t\t\t\tCmdline: cmdlineQuote(kv[2:]),\n\t\t\t}\n\t\t\tc.linuxEntries[c.curEntry] = entry\n\t\t\tc.linuxEntries[c.curLabel] = entry\n\n\t\tcase \"initrd\", \"initrd16\", \"initrdefi\":\n\t\t\tif e, ok := c.linuxEntries[c.curEntry]; ok {\n\t\t\t\ti, err := c.getFile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\te.Initrd = i\n\t\t\t}\n\n\t\tcase \"multiboot\":\n\t\t\t\/\/ TODO handle --quirk-* arguments ? (change parsing)\n\t\t\tk, err := c.getFile(arg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ from grub manual: \"Any initrd must be reloaded after using this command\" so we can replace the entry\n\t\t\tentry := &boot.MultibootImage{\n\t\t\t\tName: c.curLabel,\n\t\t\t\tKernel: k,\n\t\t\t\tCmdline: cmdlineQuote(kv[2:]),\n\t\t\t}\n\t\t\tc.mbEntries[c.curEntry] = entry\n\t\t\tc.mbEntries[c.curLabel] = entry\n\n\t\tcase \"module\":\n\t\t\t\/\/ TODO handle --nounzip arguments ? (change parsing)\n\t\t\tif e, ok := c.mbEntries[c.curEntry]; ok {\n\t\t\t\tm, err := c.getFile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t\/\/ TODO: Lasy tryGzipFilter(m)\n\t\t\t\tmod := multiboot.Module{\n\t\t\t\t\tModule: m,\n\t\t\t\t\tName: arg,\n\t\t\t\t\tCmdLine: cmdlineQuote(kv[2:]),\n\t\t\t\t}\n\t\t\t\te.Modules = append(e.Modules, mod)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package kit\n\n\/\/ InstrumentingTemplate\nvar InstrumentingTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/metrics\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc DefaultMiddlewares(method string, requestCount metrics.Counter, requestLatency metrics.TimeHistogram, logger log.Logger) endpoint.Middleware {\n\treturn endpoint.Chain(\n\t\tRequestLatencyMiddleware(method, requestLatency),\n\t\tRequestCountMiddleware(method, requestCount),\n\t\tRequestLoggingMiddleware(method, logger),\n\t)\n}\n\nfunc RequestCountMiddleware(method string, requestCount metrics.Counter) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\tdefer func() {\n\t\t\t\tmethodField := metrics.Field{Key: \"method\", Value: method}\n\t\t\t\terrorField := metrics.Field{Key: \"error\", Value: fmt.Sprintf(\"%v\", err)}\n\t\t\t\trequestCount.With(methodField).With(errorField).Add(1)\n\t\t\t}()\n\n\t\t\tresponse, err = next(ctx, request)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc RequestLatencyMiddleware(method string, requestLatency metrics.TimeHistogram) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\tdefer func(begin time.Time) {\n\t\t\t\tmethodField := metrics.Field{Key: \"method\", Value: method}\n\t\t\t\terrorField := metrics.Field{Key: \"error\", Value: fmt.Sprintf(\"%v\", err)}\n\t\t\t\trequestLatency.With(methodField).With(errorField).Observe(time.Since(begin))\n\t\t\t}(time.Now())\n\n\t\t\tresponse, err = next(ctx, request)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc RequestLoggingMiddleware(method string, logger log.Logger) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\tdefer func(begin time.Time) {\n\t\t\t\tinput, _ := json.Marshal(request)\n\t\t\t\toutput, _ := json.Marshal(response)\n\t\t\t\t_ = logger.Log(\n\t\t\t\t\t\"method\", method,\n\t\t\t\t\t\"input\", string(input),\n\t\t\t\t\t\"output\", string(output),\n\t\t\t\t\t\"err\", err,\n\t\t\t\t\t\"took\", time.Since(begin),\n\t\t\t\t)\n\t\t\t}(time.Now())\n\t\t\tresponse, err = next(ctx, request)\n\t\t\treturn\n\t\t}\n\t}\n}\n`\n\n\/\/ InterfaceTemplate\nvar InterfaceTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\npackage {{ToLower $title}}\n\ntype {{$title}}Service interface {\n{{range $funcKey, $funcValue := $schema.Functions}}\n{{$funcKey}}(ctx context.Context, req *{{Argumentize $funcValue.Properties.incoming}}) (res *{{Argumentize $funcValue.Properties.outgoing}}, err error){{end}}\n}\n`\n\n\/\/ TransportHTTPServerTemplate\nvar TransportHTTPServerTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n)\n\n\/\/ Handler functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc New{{$funcKey}}Handler(ctx context.Context, svc {{$title}}Service, middleware endpoint.Middleware, options ...httptransport.ServerOption) *httptransport.Server {\n\treturn httptransport.NewServer(\n\t\tctx,\n\t\tmiddleware(make{{$funcKey}}Endpoint(svc)),\n\t\tdecode{{$funcKey}}Request,\n\t\tencodeResponse,\n\t\toptions...,\n\t)\n}\n{{end}}\n\n\/\/ Endpoint functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc make{{$funcKey}}Endpoint(svc {{$title}}Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(*{{Argumentize $funcValue.Properties.incoming}})\n\t\treturn svc.{{$funcKey}}(ctx, req)\n\t}\n}\n{{end}}\n`\n\n\/\/ TransportHTTPClientTemplate\nvar TransportHTTPClientTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\tjujuratelimit \"github.com\/juju\/ratelimit\"\n\t\"github.com\/sony\/gobreaker\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/go-kit\/kit\/circuitbreaker\"\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\t\"github.com\/go-kit\/kit\/loadbalancer\"\n\t\"github.com\/go-kit\/kit\/loadbalancer\/static\"\n\t\"github.com\/go-kit\/kit\/log\"\n\tkitratelimit \"github.com\/go-kit\/kit\/ratelimit\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n)\n\n\n\/\/ client\ntype {{$title}}Client struct {\n{{range $funcKey, $funcValue := $schema.Functions}}\n\t{{$funcKey}}Endpoint endpoint.Endpoint\n{{end}}}\n\n\/\/ constructor\nfunc New{{$title}}Client(proxies []string, ctx context.Context, maxAttempt int, maxTime time.Duration, qps int, logger log.Logger) *{{$title}}Client {\nreturn &{{$title}}Client{\n{{range $funcKey, $funcValue := $schema.Functions}}\n{{$funcKey}}Endpoint : new{{$funcKey}}ClientEndpoint(proxies, ctx, maxAttempt, maxTime, qps, logger),{{end}}\n}\n}\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc ({{Pointerize $title}} *{{$title}}Client) {{$funcKey}}(ctx context.Context, req *{{Argumentize $funcValue.Properties.incoming}}) (*{{Argumentize $funcValue.Properties.outgoing}}, error) {\n\tres, err := {{Pointerize $title}}.{{$funcKey}}Endpoint(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.(*{{Argumentize $funcValue.Properties.outgoing}}), nil\n}\n{{end}}\n\n\n\/\/ Client Endpoint functions\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc new{{$funcKey}}ClientEndpoint(proxies []string, ctx context.Context, maxAttempt int, maxTime time.Duration, qps int, logger log.Logger) endpoint.Endpoint {\n\tfactory := createFactory(ctx, qps, make{{$funcKey}}Proxy)\n\treturn defaultClientEndpointCreator(proxies, maxAttempt, maxTime, logger, factory)\n}\n{{end}}\n\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc make{{$funcKey}}Proxy(ctx context.Context, instance string) endpoint.Endpoint {\n\treturn httptransport.NewClient(\n\t\t\"POST\",\n\t\tcreateProxyURL(instance, \"{{ToLower $funcKey}}\"),\n\t\tencodeRequest,\n\t\tdecode{{$funcKey}}Response,\n\t).Endpoint()\n}\n{{end}}\n\n\/\/ Proxy functions\n\nfunc createProxyURL(instance, endpoint string) *url.URL {\n\tif !strings.HasPrefix(instance, \"http\") {\n\t\tinstance = \"http:\/\/\" + instance\n\t}\n\tu, err := url.Parse(instance)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif u.Path == \"\" {\n\t\tu.Path = endpoint\n\t}\n\n\treturn u\n}\n\ntype proxyFunc func(context.Context, string) endpoint.Endpoint\n\nfunc createFactory(ctx context.Context, qps int, pf proxyFunc) loadbalancer.Factory {\n\treturn func(instance string) (endpoint.Endpoint, io.Closer, error) {\n\t\tvar e endpoint.Endpoint\n\t\te = pf(ctx, instance)\n\t\te = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e)\n\t\te = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e)\n\t\treturn e, nil, nil\n\t}\n}\n\nfunc defaultClientEndpointCreator(\n\tproxies []string,\n\tmaxAttempts int,\n\tmaxTime time.Duration,\n\tlogger log.Logger,\n\tfactory loadbalancer.Factory,\n) endpoint.Endpoint {\n\n\tpublisher := static.NewPublisher(\n\t\tproxies,\n\t\tfactory,\n\t\tlogger,\n\t)\n\n\tlb := loadbalancer.NewRoundRobin(publisher)\n\treturn loadbalancer.Retry(maxAttempts, maxTime, lb)\n}\n\n`\n\n\/\/ TransportHTTPSemioticsTemplate\nvar TransportHTTPSemioticsTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n)\n\n\/\/ Decode Request functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc decode{{$funcKey}}Request(r *http.Request) (interface{}, error) {\n\tvar req {{Argumentize $funcValue.Properties.incoming}}\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &req, nil\n}\n{{end}}\n\n\/\/ Decode Response functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc decode{{$funcKey}}Response(r *http.Response) (interface{}, error) {\n\tvar res {{Argumentize $funcValue.Properties.incoming}}\n\tif err := json.NewDecoder(r.Body).Decode(&res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n{{end}}\n\n\/\/ Encode request function\n\nfunc encodeRequest(r *http.Request, request interface{}) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(request); err != nil {\n\t\treturn err\n\t}\n\tr.Body = ioutil.NopCloser(&buf)\n\treturn nil\n}\n\n\/\/ Encode response function\n\nfunc encodeResponse(rw http.ResponseWriter, response interface{}) error {\n\treturn json.NewEncoder(rw).Encode(response)\n}\n`\n<commit_msg>Kit: better go formatting<commit_after>package kit\n\n\/\/ InstrumentingTemplate\nvar InstrumentingTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/metrics\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc DefaultMiddlewares(method string, requestCount metrics.Counter, requestLatency metrics.TimeHistogram, logger log.Logger) endpoint.Middleware {\n\treturn endpoint.Chain(\n\t\tRequestLatencyMiddleware(method, requestLatency),\n\t\tRequestCountMiddleware(method, requestCount),\n\t\tRequestLoggingMiddleware(method, logger),\n\t)\n}\n\nfunc RequestCountMiddleware(method string, requestCount metrics.Counter) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\tdefer func() {\n\t\t\t\tmethodField := metrics.Field{Key: \"method\", Value: method}\n\t\t\t\terrorField := metrics.Field{Key: \"error\", Value: fmt.Sprintf(\"%v\", err)}\n\t\t\t\trequestCount.With(methodField).With(errorField).Add(1)\n\t\t\t}()\n\n\t\t\tresponse, err = next(ctx, request)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc RequestLatencyMiddleware(method string, requestLatency metrics.TimeHistogram) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\tdefer func(begin time.Time) {\n\t\t\t\tmethodField := metrics.Field{Key: \"method\", Value: method}\n\t\t\t\terrorField := metrics.Field{Key: \"error\", Value: fmt.Sprintf(\"%v\", err)}\n\t\t\t\trequestLatency.With(methodField).With(errorField).Observe(time.Since(begin))\n\t\t\t}(time.Now())\n\n\t\t\tresponse, err = next(ctx, request)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc RequestLoggingMiddleware(method string, logger log.Logger) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\tdefer func(begin time.Time) {\n\t\t\t\tinput, _ := json.Marshal(request)\n\t\t\t\toutput, _ := json.Marshal(response)\n\t\t\t\t_ = logger.Log(\n\t\t\t\t\t\"method\", method,\n\t\t\t\t\t\"input\", string(input),\n\t\t\t\t\t\"output\", string(output),\n\t\t\t\t\t\"err\", err,\n\t\t\t\t\t\"took\", time.Since(begin),\n\t\t\t\t)\n\t\t\t}(time.Now())\n\t\t\tresponse, err = next(ctx, request)\n\t\t\treturn\n\t\t}\n\t}\n}\n`\n\n\/\/ InterfaceTemplate\nvar InterfaceTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\npackage {{ToLower $title}}\n\ntype {{$title}}Service interface {\n{{range $funcKey, $funcValue := $schema.Functions}}\n{{$funcKey}}(ctx context.Context, req *{{Argumentize $funcValue.Properties.incoming}}) (res *{{Argumentize $funcValue.Properties.outgoing}}, err error){{end}}\n}\n`\n\n\/\/ TransportHTTPServerTemplate\nvar TransportHTTPServerTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n)\n\n\/\/ Handler functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc New{{$funcKey}}Handler(ctx context.Context, svc {{$title}}Service, middleware endpoint.Middleware, options ...httptransport.ServerOption) *httptransport.Server {\n\treturn httptransport.NewServer(\n\t\tctx,\n\t\tmiddleware(make{{$funcKey}}Endpoint(svc)),\n\t\tdecode{{$funcKey}}Request,\n\t\tencodeResponse,\n\t\toptions...,\n\t)\n}\n{{end}}\n\n\/\/ Endpoint functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc make{{$funcKey}}Endpoint(svc {{$title}}Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(*{{Argumentize $funcValue.Properties.incoming}})\n\t\treturn svc.{{$funcKey}}(ctx, req)\n\t}\n}\n{{end}}\n`\n\n\/\/ TransportHTTPClientTemplate\nvar TransportHTTPClientTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\tjujuratelimit \"github.com\/juju\/ratelimit\"\n\t\"github.com\/sony\/gobreaker\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/go-kit\/kit\/circuitbreaker\"\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\t\"github.com\/go-kit\/kit\/loadbalancer\"\n\t\"github.com\/go-kit\/kit\/loadbalancer\/static\"\n\t\"github.com\/go-kit\/kit\/log\"\n\tkitratelimit \"github.com\/go-kit\/kit\/ratelimit\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n)\n\n\n\/\/ {{$title}}Client holds remote endpoint functions\n\/\/ Satisfies {{$title}}Service interface\ntype {{$title}}Client struct {\n{{range $funcKey, $funcValue := $schema.Functions}}\/\/ {{$funcKey}}Endpoint provides remote call to {{ToLower $funcKey}} endpoint\n\t{{$funcKey}}Endpoint endpoint.Endpoint\n\n{{end}}}\n\n\/\/ New{{$title}}Client creates a new client for {{$title}}Service\nfunc New{{$title}}Client(proxies []string, ctx context.Context, maxAttempt int, maxTime time.Duration, qps int, logger log.Logger) *{{$title}}Client {\nreturn &{{$title}}Client{\n{{range $funcKey, $funcValue := $schema.Functions}}\n{{$funcKey}}Endpoint : new{{$funcKey}}ClientEndpoint(proxies, ctx, maxAttempt, maxTime, qps, logger),{{end}}\n}\n}\n\n{{range $funcKey, $funcValue := $schema.Functions}}\n{{AsComment $funcValue.Description}}func ({{Pointerize $title}} *{{$title}}Client) {{$funcKey}}(ctx context.Context, req *{{Argumentize $funcValue.Properties.incoming}}) (*{{Argumentize $funcValue.Properties.outgoing}}, error) {\n\tres, err := {{Pointerize $title}}.{{$funcKey}}Endpoint(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.(*{{Argumentize $funcValue.Properties.outgoing}}), nil\n}\n{{end}}\n\n\n\/\/ Client Endpoint functions\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc new{{$funcKey}}ClientEndpoint(proxies []string, ctx context.Context, maxAttempt int, maxTime time.Duration, qps int, logger log.Logger) endpoint.Endpoint {\n\tfactory := createFactory(ctx, qps, make{{$funcKey}}Proxy)\n\treturn defaultClientEndpointCreator(proxies, maxAttempt, maxTime, logger, factory)\n}\n{{end}}\n\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc make{{$funcKey}}Proxy(ctx context.Context, instance string) endpoint.Endpoint {\n\treturn httptransport.NewClient(\n\t\t\"POST\",\n\t\tcreateProxyURL(instance, \"{{ToLower $funcKey}}\"),\n\t\tencodeRequest,\n\t\tdecode{{$funcKey}}Response,\n\t).Endpoint()\n}\n{{end}}\n\n\/\/ Proxy functions\n\nfunc createProxyURL(instance, endpoint string) *url.URL {\n\tif !strings.HasPrefix(instance, \"http\") {\n\t\tinstance = \"http:\/\/\" + instance\n\t}\n\tu, err := url.Parse(instance)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif u.Path == \"\" {\n\t\tu.Path = endpoint\n\t}\n\n\treturn u\n}\n\ntype proxyFunc func(context.Context, string) endpoint.Endpoint\n\nfunc createFactory(ctx context.Context, qps int, pf proxyFunc) loadbalancer.Factory {\n\treturn func(instance string) (endpoint.Endpoint, io.Closer, error) {\n\t\tvar e endpoint.Endpoint\n\t\te = pf(ctx, instance)\n\t\te = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e)\n\t\te = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e)\n\t\treturn e, nil, nil\n\t}\n}\n\nfunc defaultClientEndpointCreator(\n\tproxies []string,\n\tmaxAttempts int,\n\tmaxTime time.Duration,\n\tlogger log.Logger,\n\tfactory loadbalancer.Factory,\n) endpoint.Endpoint {\n\n\tpublisher := static.NewPublisher(\n\t\tproxies,\n\t\tfactory,\n\t\tlogger,\n\t)\n\n\tlb := loadbalancer.NewRoundRobin(publisher)\n\treturn loadbalancer.Retry(maxAttempts, maxTime, lb)\n}\n\n`\n\n\/\/ TransportHTTPSemioticsTemplate\nvar TransportHTTPSemioticsTemplate = `\n{{$schema := .Schema}}\n{{$title := ToUpperFirst .Schema.Title}}\n\n\npackage {{ToLower $title}}\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n)\n\n\/\/ Decode Request functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc decode{{$funcKey}}Request(r *http.Request) (interface{}, error) {\n\tvar req {{Argumentize $funcValue.Properties.incoming}}\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &req, nil\n}\n{{end}}\n\n\/\/ Decode Response functions\n\n{{range $funcKey, $funcValue := $schema.Functions}}\nfunc decode{{$funcKey}}Response(r *http.Response) (interface{}, error) {\n\tvar res {{Argumentize $funcValue.Properties.incoming}}\n\tif err := json.NewDecoder(r.Body).Decode(&res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n{{end}}\n\n\/\/ Encode request function\n\nfunc encodeRequest(r *http.Request, request interface{}) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(request); err != nil {\n\t\treturn err\n\t}\n\tr.Body = ioutil.NopCloser(&buf)\n\treturn nil\n}\n\n\/\/ Encode response function\n\nfunc encodeResponse(rw http.ResponseWriter, response interface{}) error {\n\treturn json.NewEncoder(rw).Encode(response)\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package config\n\ntype RawGit struct {\n\t*RawGitExport `yaml:\",inline\"`\n\tAs string `yaml:\"as,omitempty\"`\n\tUrl string `yaml:\"url,omitempty\"`\n\tBranch string `yaml:\"branch,omitempty\"`\n\tCommit string `yaml:\"commit,omitempty\"`\n\tRawStageDependencies *RawStageDependencies `yaml:\"stageDependencies,omitempty\"`\n\n\tRawDimg *RawDimg `yaml:\"-\"` \/\/ parent\n\n\tUnsupportedAttributes map[string]interface{} `yaml:\",inline\"`\n}\n\nfunc (c *RawGit) Type() string {\n\tif c.Url != \"\" {\n\t\treturn \"remote\"\n\t}\n\treturn \"local\"\n}\n\nfunc (c *RawGit) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tif parent, ok := ParentStack.Peek().(*RawDimg); ok {\n\t\tc.RawDimg = parent\n\t}\n\n\tParentStack.Push(c)\n\ttype plain RawGit\n\terr := unmarshal((*plain)(c))\n\tParentStack.Pop()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := CheckOverflow(c.UnsupportedAttributes, c); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *RawGit) ToGitLocalDirective() (gitLocal *GitLocal, err error) {\n\tgitLocal = &GitLocal{}\n\n\tif gitExport, err := c.RawGitExport.ToDirective(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tgitLocal.GitExport = gitExport\n\t}\n\n\tif c.RawStageDependencies != nil {\n\t\tif stageDependencies, err := c.RawStageDependencies.ToDirective(); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tgitLocal.StageDependencies = stageDependencies\n\t\t}\n\t}\n\n\tgitLocal.As = c.As\n\n\tgitLocal.Raw = c\n\n\tif err := c.ValidateGitLocalDirective(gitLocal); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitLocal, nil\n}\n\nfunc (c *RawGit) ValidateGitLocalDirective(gitLocal *GitLocal) (err error) {\n\tif err := gitLocal.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *RawGit) ToGitRemoteDirective() (gitRemote *GitRemote, err error) {\n\tgitRemote = &GitRemote{}\n\n\tif gitLocal, err := c.ToGitLocalDirective(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tgitRemote.GitLocal = gitLocal\n\t}\n\n\tgitRemote.Branch = c.Branch\n\tgitRemote.Commit = c.Commit\n\tgitRemote.Url = c.Url\n\t\/\/ TODO: gitRemote.Name = вычленить имя из c.Url\n\n\tgitRemote.Raw = c\n\n\tif err := c.ValidateGitRemoteDirective(gitRemote); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitRemote, nil\n}\n\nfunc (c *RawGit) ValidateGitRemoteDirective(gitRemote *GitRemote) (err error) {\n\tif err := gitRemote.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>fix panic \"Option ,inline needs a struct value field\"<commit_after>package config\n\ntype RawGit struct {\n\tRawGitExport `yaml:\",inline\"`\n\tAs string `yaml:\"as,omitempty\"`\n\tUrl string `yaml:\"url,omitempty\"`\n\tBranch string `yaml:\"branch,omitempty\"`\n\tCommit string `yaml:\"commit,omitempty\"`\n\tRawStageDependencies *RawStageDependencies `yaml:\"stageDependencies,omitempty\"`\n\n\tRawDimg *RawDimg `yaml:\"-\"` \/\/ parent\n\n\tUnsupportedAttributes map[string]interface{} `yaml:\",inline\"`\n}\n\nfunc (c *RawGit) Type() string {\n\tif c.Url != \"\" {\n\t\treturn \"remote\"\n\t}\n\treturn \"local\"\n}\n\nfunc (c *RawGit) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tif parent, ok := ParentStack.Peek().(*RawDimg); ok {\n\t\tc.RawDimg = parent\n\t}\n\n\tParentStack.Push(c)\n\ttype plain RawGit\n\terr := unmarshal((*plain)(c))\n\tParentStack.Pop()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := CheckOverflow(c.UnsupportedAttributes, c); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *RawGit) ToGitLocalDirective() (gitLocal *GitLocal, err error) {\n\tgitLocal = &GitLocal{}\n\n\tif gitExport, err := c.RawGitExport.ToDirective(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tgitLocal.GitExport = gitExport\n\t}\n\n\tif c.RawStageDependencies != nil {\n\t\tif stageDependencies, err := c.RawStageDependencies.ToDirective(); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tgitLocal.StageDependencies = stageDependencies\n\t\t}\n\t}\n\n\tgitLocal.As = c.As\n\n\tgitLocal.Raw = c\n\n\tif err := c.ValidateGitLocalDirective(gitLocal); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitLocal, nil\n}\n\nfunc (c *RawGit) ValidateGitLocalDirective(gitLocal *GitLocal) (err error) {\n\tif err := gitLocal.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *RawGit) ToGitRemoteDirective() (gitRemote *GitRemote, err error) {\n\tgitRemote = &GitRemote{}\n\n\tif gitLocal, err := c.ToGitLocalDirective(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tgitRemote.GitLocal = gitLocal\n\t}\n\n\tgitRemote.Branch = c.Branch\n\tgitRemote.Commit = c.Commit\n\tgitRemote.Url = c.Url\n\t\/\/ TODO: gitRemote.Name = вычленить имя из c.Url\n\n\tgitRemote.Raw = c\n\n\tif err := c.ValidateGitRemoteDirective(gitRemote); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitRemote, nil\n}\n\nfunc (c *RawGit) ValidateGitRemoteDirective(gitRemote *GitRemote) (err error) {\n\tif err := gitRemote.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package data\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/mobile\"\n\t\"github.com\/pkg\/errors\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\tv1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n)\n\nconst apiKeyMapName = \"mcp-mobile-keys\"\n\n\/\/ MobileAppValidator defines what a validator should do\ntype MobileAppValidator interface {\n\tPreCreate(a *mobile.App) error\n\tPreUpdate(old *mobile.App, new *mobile.App) error\n}\n\n\/\/ MobileAppRepo interacts with the data store that backs the mobile objects\ntype MobileAppRepo struct {\n\tclient corev1.ConfigMapInterface\n\tvalidator MobileAppValidator\n}\n\n\/\/ NewMobileAppRepo instansiates a new MobileAppRepo\nfunc NewMobileAppRepo(c corev1.ConfigMapInterface, v MobileAppValidator) *MobileAppRepo {\n\trep := &MobileAppRepo{\n\t\tclient: c,\n\t\tvalidator: v,\n\t}\n\tif rep.validator == nil {\n\t\trep.validator = &DefaultMobileAppValidator{}\n\t}\n\treturn rep\n}\n\n\/\/ UpdateAppApiKeys adds new app api key to apiKey mapping\nfunc (mar *MobileAppRepo) UpdateAppAPIKeys(app *mobile.App) error {\n\tcm, err := mar.client.Get(apiKeyMapName, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"updating api key config map, could not read\")\n\t}\n\tif cm.Data == nil {\n\t\tcm.Data = map[string]string{}\n\t}\n\tcm.Data[app.ID] = app.APIKey\n\tif _, err := mar.client.Update(cm); err != nil {\n\t\treturn errors.Wrap(err, \"updating api key map, could not save map\")\n\t}\n\treturn nil\n}\n\n\/\/ DeleteAppAPIKey remove api key from apiKey mapping\nfunc (mar *MobileAppRepo) RemoveAppAPIKeyByID(appID string) error {\n\tcm, err := mar.client.Get(apiKeyMapName, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"deleting api key map, could not read\")\n\t}\n\tif cm.Data == nil {\n\t\tcm.Data = map[string]string{}\n\t}\n\tdelete(cm.Data, appID)\n\tif _, err := mar.client.Update(cm); err != nil {\n\t\treturn errors.Wrap(err, \"deleting api key map, could not save map\")\n\t}\n\treturn nil\n}\n\n\/\/ CreateAppAPIMap Ensure that the API Key map is created\nfunc (mar *MobileAppRepo) CreateAppAPIKeyMap() error {\n\t_, err := mar.client.Get(apiKeyMapName, meta_v1.GetOptions{})\n\tif err != nil {\n\t\t\/\/ apiKey map may not exist, create it\n\t\t_, err := mar.client.Create(&v1.ConfigMap{\n\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\tName: apiKeyMapName,\n\t\t\t},\n\t\t\tData: map[string]string{},\n\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ReadByName attempts to read a mobile app by its unique name\nfunc (mar *MobileAppRepo) ReadByName(name string) (*mobile.App, error) {\n\t_, cm, err := mar.readMobileAppAndConfigMap(name)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to retrieve mobile app \")\n\t}\n\treturn convertConfigMapToMobileApp(cm), nil\n}\n\n\/\/ Create creates a mobile app object. Fails on duplicates\nfunc (mar *MobileAppRepo) Create(app *mobile.App) error {\n\tif err := mar.validator.PreCreate(app); err != nil {\n\t\treturn errors.Wrap(err, \"validation failed during create\")\n\t}\n\tapp.ID = app.Name + \"-\" + fmt.Sprintf(\"%v\", time.Now().Unix())\n\tapp.MetaData[\"created\"] = time.Now().Format(\"2006-01-02 15:04:05\")\n\tcm := convertMobileAppToConfigMap(app)\n\tif _, err := mar.client.Create(cm); err != nil {\n\t\treturn errors.Wrap(err, \"failed to create underlying configmap for mobile app\")\n\t}\n\treturn nil\n}\n\n\/\/DeleteByName will delte the underlying configmap\nfunc (mar *MobileAppRepo) DeleteByName(name string) error {\n\treturn mar.client.Delete(name, &meta_v1.DeleteOptions{})\n}\n\n\/\/List will list the configmaps and convert them to mobileapps\nfunc (mar *MobileAppRepo) List() ([]*mobile.App, error) {\n\tlist, err := mar.client.List(meta_v1.ListOptions{LabelSelector: \"group=mobileapp\"})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to list mobileapp configmaps\")\n\t}\n\n\tvar apps = []*mobile.App{}\n\tfor _, a := range list.Items {\n\t\tapps = append(apps, convertConfigMapToMobileApp(&a))\n\t}\n\treturn apps, nil\n}\n\n\/\/ Update will update the underlying configmap with the new details\nfunc (mar *MobileAppRepo) Update(app *mobile.App) (*mobile.App, error) {\n\told, cm, err := mar.readMobileAppAndConfigMap(app.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := mar.validator.PreUpdate(old, app); err != nil {\n\t\treturn nil, errors.Wrap(err, \"validation failed before update\")\n\t}\n\tcm.Data[\"name\"] = app.Name\n\tcm.Data[\"clientType\"] = app.ClientType\n\tvar cmap *v1.ConfigMap\n\tcmap, err = mar.client.Update(cm)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to update mobile app configmap\")\n\t}\n\treturn convertConfigMapToMobileApp(cmap), nil\n}\n\nfunc convertConfigMapToMobileApp(m *v1.ConfigMap) *mobile.App {\n\treturn &mobile.App{\n\t\tID: m.Name,\n\t\tName: m.Data[\"name\"],\n\t\tClientType: m.Data[\"clientType\"],\n\t\tAPIKey: m.Data[\"apiKey\"],\n\t\tLabels: m.Labels,\n\t\tDescription: m.Data[\"description\"],\n\t\tMetaData: map[string]string{\n\t\t\t\"icon\": m.Annotations[\"icon\"],\n\t\t\t\"created\": m.Annotations[\"created\"],\n\t\t},\n\t}\n}\n\nfunc convertMobileAppToConfigMap(app *mobile.App) *v1.ConfigMap {\n\treturn &v1.ConfigMap{\n\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\tName: app.ID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"group\": \"mobileapp\",\n\t\t\t\t\"name\": app.Name,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"icon\": app.MetaData[\"icon\"],\n\t\t\t\t\"created\": app.MetaData[\"created\"],\n\t\t\t},\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"name\": app.Name,\n\t\t\t\"clientType\": app.ClientType,\n\t\t\t\"apiKey\": app.APIKey,\n\t\t\t\"description\": app.Description,\n\t\t},\n\t}\n}\n\nfunc (mar *MobileAppRepo) readMobileAppAndConfigMap(name string) (*mobile.App, *v1.ConfigMap, error) {\n\tcm, err := mar.readUnderlyingConfigMap(&mobile.App{Name: name})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tapp := convertConfigMapToMobileApp(cm)\n\treturn app, cm, err\n}\n\nfunc (mar *MobileAppRepo) readUnderlyingConfigMap(a *mobile.App) (*v1.ConfigMap, error) {\n\tcm, err := mar.client.Get(a.Name, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to read underlying configmap for app \"+a.Name)\n\t}\n\treturn cm, nil\n}\n\n\/\/NewMobileAppRepoBuilder creates a new instance of a MobileAppRepoBuilder\nfunc NewMobileAppRepoBuilder() mobile.AppRepoBuilder {\n\treturn &MobileAppRepoBuilder{}\n}\n\n\/\/ MobileAppRepoBuilder builds a MobileAppRepo\ntype MobileAppRepoBuilder struct {\n\tclient corev1.ConfigMapInterface\n}\n\n\/\/ WithClient sets the client to use\nfunc (marb *MobileAppRepoBuilder) WithClient(c corev1.ConfigMapInterface) mobile.AppRepoBuilder {\n\treturn &MobileAppRepoBuilder{\n\t\tclient: c,\n\t}\n}\n\n\/\/ Build builds the final repo\nfunc (marb *MobileAppRepoBuilder) Build() mobile.AppCruder {\n\treturn NewMobileAppRepo(marb.client, DefaultMobileAppValidator{})\n}\n<commit_msg>FH-4070 Match comment with function name<commit_after>package data\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/mobile\"\n\t\"github.com\/pkg\/errors\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\tv1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n)\n\nconst apiKeyMapName = \"mcp-mobile-keys\"\n\n\/\/ MobileAppValidator defines what a validator should do\ntype MobileAppValidator interface {\n\tPreCreate(a *mobile.App) error\n\tPreUpdate(old *mobile.App, new *mobile.App) error\n}\n\n\/\/ MobileAppRepo interacts with the data store that backs the mobile objects\ntype MobileAppRepo struct {\n\tclient corev1.ConfigMapInterface\n\tvalidator MobileAppValidator\n}\n\n\/\/ NewMobileAppRepo instansiates a new MobileAppRepo\nfunc NewMobileAppRepo(c corev1.ConfigMapInterface, v MobileAppValidator) *MobileAppRepo {\n\trep := &MobileAppRepo{\n\t\tclient: c,\n\t\tvalidator: v,\n\t}\n\tif rep.validator == nil {\n\t\trep.validator = &DefaultMobileAppValidator{}\n\t}\n\treturn rep\n}\n\n\/\/ UpdateAppApiKeys adds new app api key to apiKey mapping\nfunc (mar *MobileAppRepo) UpdateAppAPIKeys(app *mobile.App) error {\n\tcm, err := mar.client.Get(apiKeyMapName, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"updating api key config map, could not read\")\n\t}\n\tif cm.Data == nil {\n\t\tcm.Data = map[string]string{}\n\t}\n\tcm.Data[app.ID] = app.APIKey\n\tif _, err := mar.client.Update(cm); err != nil {\n\t\treturn errors.Wrap(err, \"updating api key map, could not save map\")\n\t}\n\treturn nil\n}\n\n\/\/ RemoveAppAPIKeyByID remove api key from apiKey mapping\nfunc (mar *MobileAppRepo) RemoveAppAPIKeyByID(appID string) error {\n\tcm, err := mar.client.Get(apiKeyMapName, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"deleting api key map, could not read\")\n\t}\n\tif cm.Data == nil {\n\t\tcm.Data = map[string]string{}\n\t}\n\tdelete(cm.Data, appID)\n\tif _, err := mar.client.Update(cm); err != nil {\n\t\treturn errors.Wrap(err, \"deleting api key map, could not save map\")\n\t}\n\treturn nil\n}\n\n\/\/ CreateAppAPIMap Ensure that the API Key map is created\nfunc (mar *MobileAppRepo) CreateAppAPIKeyMap() error {\n\t_, err := mar.client.Get(apiKeyMapName, meta_v1.GetOptions{})\n\tif err != nil {\n\t\t\/\/ apiKey map may not exist, create it\n\t\t_, err := mar.client.Create(&v1.ConfigMap{\n\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\tName: apiKeyMapName,\n\t\t\t},\n\t\t\tData: map[string]string{},\n\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ReadByName attempts to read a mobile app by its unique name\nfunc (mar *MobileAppRepo) ReadByName(name string) (*mobile.App, error) {\n\t_, cm, err := mar.readMobileAppAndConfigMap(name)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to retrieve mobile app \")\n\t}\n\treturn convertConfigMapToMobileApp(cm), nil\n}\n\n\/\/ Create creates a mobile app object. Fails on duplicates\nfunc (mar *MobileAppRepo) Create(app *mobile.App) error {\n\tif err := mar.validator.PreCreate(app); err != nil {\n\t\treturn errors.Wrap(err, \"validation failed during create\")\n\t}\n\tapp.ID = app.Name + \"-\" + fmt.Sprintf(\"%v\", time.Now().Unix())\n\tapp.MetaData[\"created\"] = time.Now().Format(\"2006-01-02 15:04:05\")\n\tcm := convertMobileAppToConfigMap(app)\n\tif _, err := mar.client.Create(cm); err != nil {\n\t\treturn errors.Wrap(err, \"failed to create underlying configmap for mobile app\")\n\t}\n\treturn nil\n}\n\n\/\/DeleteByName will delte the underlying configmap\nfunc (mar *MobileAppRepo) DeleteByName(name string) error {\n\treturn mar.client.Delete(name, &meta_v1.DeleteOptions{})\n}\n\n\/\/List will list the configmaps and convert them to mobileapps\nfunc (mar *MobileAppRepo) List() ([]*mobile.App, error) {\n\tlist, err := mar.client.List(meta_v1.ListOptions{LabelSelector: \"group=mobileapp\"})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to list mobileapp configmaps\")\n\t}\n\n\tvar apps = []*mobile.App{}\n\tfor _, a := range list.Items {\n\t\tapps = append(apps, convertConfigMapToMobileApp(&a))\n\t}\n\treturn apps, nil\n}\n\n\/\/ Update will update the underlying configmap with the new details\nfunc (mar *MobileAppRepo) Update(app *mobile.App) (*mobile.App, error) {\n\told, cm, err := mar.readMobileAppAndConfigMap(app.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := mar.validator.PreUpdate(old, app); err != nil {\n\t\treturn nil, errors.Wrap(err, \"validation failed before update\")\n\t}\n\tcm.Data[\"name\"] = app.Name\n\tcm.Data[\"clientType\"] = app.ClientType\n\tvar cmap *v1.ConfigMap\n\tcmap, err = mar.client.Update(cm)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to update mobile app configmap\")\n\t}\n\treturn convertConfigMapToMobileApp(cmap), nil\n}\n\nfunc convertConfigMapToMobileApp(m *v1.ConfigMap) *mobile.App {\n\treturn &mobile.App{\n\t\tID: m.Name,\n\t\tName: m.Data[\"name\"],\n\t\tClientType: m.Data[\"clientType\"],\n\t\tAPIKey: m.Data[\"apiKey\"],\n\t\tLabels: m.Labels,\n\t\tDescription: m.Data[\"description\"],\n\t\tMetaData: map[string]string{\n\t\t\t\"icon\": m.Annotations[\"icon\"],\n\t\t\t\"created\": m.Annotations[\"created\"],\n\t\t},\n\t}\n}\n\nfunc convertMobileAppToConfigMap(app *mobile.App) *v1.ConfigMap {\n\treturn &v1.ConfigMap{\n\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\tName: app.ID,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"group\": \"mobileapp\",\n\t\t\t\t\"name\": app.Name,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"icon\": app.MetaData[\"icon\"],\n\t\t\t\t\"created\": app.MetaData[\"created\"],\n\t\t\t},\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"name\": app.Name,\n\t\t\t\"clientType\": app.ClientType,\n\t\t\t\"apiKey\": app.APIKey,\n\t\t\t\"description\": app.Description,\n\t\t},\n\t}\n}\n\nfunc (mar *MobileAppRepo) readMobileAppAndConfigMap(name string) (*mobile.App, *v1.ConfigMap, error) {\n\tcm, err := mar.readUnderlyingConfigMap(&mobile.App{Name: name})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tapp := convertConfigMapToMobileApp(cm)\n\treturn app, cm, err\n}\n\nfunc (mar *MobileAppRepo) readUnderlyingConfigMap(a *mobile.App) (*v1.ConfigMap, error) {\n\tcm, err := mar.client.Get(a.Name, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to read underlying configmap for app \"+a.Name)\n\t}\n\treturn cm, nil\n}\n\n\/\/NewMobileAppRepoBuilder creates a new instance of a MobileAppRepoBuilder\nfunc NewMobileAppRepoBuilder() mobile.AppRepoBuilder {\n\treturn &MobileAppRepoBuilder{}\n}\n\n\/\/ MobileAppRepoBuilder builds a MobileAppRepo\ntype MobileAppRepoBuilder struct {\n\tclient corev1.ConfigMapInterface\n}\n\n\/\/ WithClient sets the client to use\nfunc (marb *MobileAppRepoBuilder) WithClient(c corev1.ConfigMapInterface) mobile.AppRepoBuilder {\n\treturn &MobileAppRepoBuilder{\n\t\tclient: c,\n\t}\n}\n\n\/\/ Build builds the final repo\nfunc (marb *MobileAppRepoBuilder) Build() mobile.AppCruder {\n\treturn NewMobileAppRepo(marb.client, DefaultMobileAppValidator{})\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/drone\/drone\/pkg\/build\/script\"\n\t\"github.com\/drone\/drone\/pkg\/database\"\n\t. \"github.com\/drone\/drone\/pkg\/model\"\n\t\"github.com\/drone\/drone\/pkg\/queue\"\n\t\"github.com\/plouc\/go-gitlab-client\"\n)\n\ntype GitlabHandler struct {\n\tqueue *queue.Queue\n\tapiPath string\n}\n\nfunc NewGitlabHandler(queue *queue.Queue) *GitlabHandler {\n\treturn &GitlabHandler{\n\t\tqueue: queue,\n\t\tapiPath: \"\/api\/v3\",\n\t}\n}\n\nfunc (g *GitlabHandler) Add(w http.ResponseWriter, r *http.Request, u *User) error {\n\tsettings := database.SettingsMust()\n\tteams, err := database.ListTeams(u.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata := struct {\n\t\tUser *User\n\t\tTeams []*Team\n\t\tSettings *Settings\n\t}{u, teams, settings}\n\t\/\/ if the user hasn't linked their GitLab account\n\t\/\/ render a different template\n\tif len(u.GitlabToken) == 0 {\n\t\treturn RenderTemplate(w, \"gitlab_link.html\", &data)\n\t}\n\t\/\/ otherwise display the template for adding\n\t\/\/ a new GitLab repository.\n\treturn RenderTemplate(w, \"gitlab_add.html\", &data)\n}\n\nfunc (g *GitlabHandler) Link(w http.ResponseWriter, r *http.Request, u *User) error {\n\ttoken := strings.TrimSpace(r.FormValue(\"token\"))\n\n\tif len(u.GitlabToken) == 0 || token != u.GitlabToken && len(token) > 0 {\n\t\tu.GitlabToken = token\n\t\tsettings := database.SettingsMust()\n\t\tgl := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, u.GitlabToken)\n\t\t_, err := gl.CurrentUser()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Private Token is not valid: %q\", err)\n\t\t}\n\t\tif err := database.SaveUser(u); err != nil {\n\t\t\treturn RenderError(w, err, http.StatusBadRequest)\n\t\t}\n\t}\n\n\thttp.Redirect(w, r, \"\/new\/gitlab\", http.StatusSeeOther)\n\treturn nil\n}\n\nfunc (g *GitlabHandler) ReLink(w http.ResponseWriter, r *http.Request, u *User) error {\n\tdata := struct {\n\t\tUser *User\n\t}{u}\n\treturn RenderTemplate(w, \"gitlab_link.html\", &data)\n}\n\nfunc (g *GitlabHandler) Create(w http.ResponseWriter, r *http.Request, u *User) error {\n\tteamName := r.FormValue(\"team\")\n\towner := r.FormValue(\"owner\")\n\tname := r.FormValue(\"name\")\n\n\trepo, err := g.newGitlabRepo(u, owner, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(teamName) > 0 {\n\t\tteam, err := database.GetTeamSlug(teamName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to find Team %s.\", teamName)\n\t\t}\n\n\t\t\/\/ user must be an admin member of the team\n\t\tif ok, _ := database.IsMemberAdmin(u.ID, team.ID); !ok {\n\t\t\treturn fmt.Errorf(\"Invalid permission to access Team %s.\", teamName)\n\t\t}\n\t\trepo.TeamID = team.ID\n\t}\n\n\t\/\/ Save to the database\n\tif err := database.SaveRepo(repo); err != nil {\n\t\treturn fmt.Errorf(\"Error saving repository to the database. %s\", err)\n\t}\n\n\treturn RenderText(w, http.StatusText(http.StatusOK), http.StatusOK)\n}\n\nfunc (g *GitlabHandler) newGitlabRepo(u *User, owner, name string) (*Repo, error) {\n\tsettings := database.SettingsMust()\n\tgl := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, u.GitlabToken)\n\n\tproject, err := gl.Project(ns(owner, name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cloneUrl string\n\tif project.Public {\n\t\tcloneUrl = project.HttpRepoUrl\n\t} else {\n\t\tcloneUrl = project.SshRepoUrl\n\t}\n\n\trepo, err := NewRepo(settings.GitlabDomain, owner, name, ScmGit, cloneUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo.UserID = u.ID\n\trepo.Private = !project.Public\n\tif repo.Private {\n\t\t\/\/ name the key\n\t\tkeyName := fmt.Sprintf(\"%s@%s\", repo.Owner, settings.Domain)\n\n\t\t\/\/ TODO: (fudanchii) check if we already opted to use UserKey\n\n\t\t\/\/ create the github key, or update if one already exists\n\t\tif err := gl.AddProjectDeployKey(ns(owner, name), keyName, repo.PublicKey); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to add Public Key to your GitLab repository.\")\n\t\t}\n\t}\n\n\tlink := fmt.Sprintf(\"%s:\/\/%s\/hook\/gitlab?id=%s\", settings.Scheme, settings.Domain, repo.Slug)\n\tif err := gl.AddProjectHook(ns(owner, name), link, true, false, true); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to add Hook to your GitLab repository.\")\n\t}\n\n\treturn repo, err\n}\n\nfunc (g *GitlabHandler) Hook(w http.ResponseWriter, r *http.Request) error {\n\trID := r.FormValue(\"id\")\n\trepo, err := database.GetRepoSlug(rID)\n\tif err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n\n\tuser, err := database.GetUser(repo.UserID)\n\tif err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n\n\tpayload, _ := ioutil.ReadAll(r.Body)\n\tparsed, err := gogitlab.ParseHook(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif parsed.ObjectKind == \"merge_request\" {\n\t\tfmt.Println(string(payload))\n\t\tif err := g.PullRequestHook(parsed, repo, user); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn RenderText(w, http.StatusText(http.StatusOK), http.StatusOK)\n\t}\n\n\tif len(parsed.After) == 0 {\n\t\treturn RenderText(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n\n\t_, err = database.GetCommitHash(parsed.After, repo.ID)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tprintln(\"commit already exists\")\n\t\treturn RenderText(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t}\n\n\tcommit := &Commit{}\n\tcommit.RepoID = repo.ID\n\tcommit.Branch = parsed.Branch()\n\tcommit.Hash = parsed.After\n\tcommit.Status = \"Pending\"\n\tcommit.Created = time.Now().UTC()\n\n\thead := parsed.Head()\n\tcommit.Message = head.Message\n\tcommit.Timestamp = head.Timestamp\n\tif head.Author != nil {\n\t\tcommit.SetAuthor(head.Author.Email)\n\t} else {\n\t\tcommit.SetAuthor(parsed.UserName)\n\t}\n\n\t\/\/ get the github settings from the database\n\tsettings := database.SettingsMust()\n\n\t\/\/ get the drone.yml file from GitHub\n\tclient := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, user.GitlabToken)\n\n\tcontent, err := client.RepoRawFile(ns(repo.Owner, repo.Name), commit.Hash, \".drone.yml\")\n\tif err != nil {\n\t\tmsg := \"No .drone.yml was found in this repository. You need to add one.\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn RenderText(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t}\n\n\t\/\/ parse the build script\n\tbuildscript, err := script.ParseBuild(content, repo.Params)\n\tif err != nil {\n\t\tmsg := \"Could not parse your .drone.yml file. It needs to be a valid drone yaml file.\\n\\n\" + err.Error() + \"\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn RenderText(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t}\n\n\t\/\/ save the commit to the database\n\tif err := database.SaveCommit(commit); err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\t\/\/ save the build to the database\n\tbuild := &Build{}\n\tbuild.Slug = \"1\" \/\/ TODO\n\tbuild.CommitID = commit.ID\n\tbuild.Created = time.Now().UTC()\n\tbuild.Status = \"Pending\"\n\tif err := database.SaveBuild(build); err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\tg.queue.Add(&queue.BuildTask{Repo: repo, Commit: commit, Build: build, Script: buildscript})\n\n\t\/\/ OK!\n\treturn RenderText(w, http.StatusText(http.StatusOK), http.StatusOK)\n\n}\n\nfunc (g *GitlabHandler) PullRequestHook(p *gogitlab.HookPayload, repo *Repo, user *User) error {\n\tobj := p.ObjectAttributes\n\n\t\/\/ Gitlab may trigger multiple hooks upon updating merge requests status\n\t\/\/ only build when it was just opened and the merge hasn't been checked yet.\n\tif !(obj.State == \"opened\" && obj.MergeStatus == \"unchecked\") {\n\t\tprintln(\"Ignore GitLab Merge Requests\")\n\t\treturn nil\n\t}\n\n\tsettings := database.SettingsMust()\n\n\tclient := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, user.GitlabToken)\n\n\t\/\/ GitLab merge-requests hook doesn't include repository data.\n\t\/\/ Have to fetch it manually\n\tsrc, err := client.RepoBranch(strconv.Itoa(obj.SourceProjectId), obj.SourceBranch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = database.GetCommitHash(src.Commit.Id, repo.ID)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tprintln(\"commit already exists\")\n\t\treturn err\n\t}\n\n\tcommit := &Commit{}\n\tcommit.RepoID = repo.ID\n\tcommit.Branch = src.Name\n\tcommit.Hash = src.Commit.Id\n\tcommit.Status = \"Pending\"\n\tcommit.Created = time.Now().UTC()\n\n\tcommit.Message = src.Commit.Message\n\tcommit.Timestamp = src.Commit.AuthoredDateRaw\n\tcommit.SetAuthor(src.Commit.Author.Email)\n\n\tcontent, err := client.RepoRawFile(strconv.Itoa(obj.SourceProjectId), commit.Hash, \".drone.yml\")\n\tif err != nil {\n\t\tmsg := \"No .drone.yml was found in this repository. You need to add one.\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to save build: %q\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"Error to fetch build script: %q\", err)\n\t}\n\n\t\/\/ parse the build script\n\tbuildscript, err := script.ParseBuild(content, repo.Params)\n\tif err != nil {\n\t\tmsg := \"Could not parse your .drone.yml file. It needs to be a valid drone yaml file.\\n\\n\" + err.Error() + \"\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to save build: %q\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to parse build script: %q\", err)\n\t}\n\n\t\/\/ save the commit to the database\n\tif err := database.SaveCommit(commit); err != nil {\n\t\treturn fmt.Errorf(\"Failed to save commit: %q\", err)\n\t}\n\n\t\/\/ save the build to the database\n\tbuild := &Build{}\n\tbuild.Slug = \"1\" \/\/ TODO\n\tbuild.CommitID = commit.ID\n\tbuild.Created = time.Now().UTC()\n\tbuild.Status = \"Pending\"\n\tif err := database.SaveBuild(build); err != nil {\n\t\treturn fmt.Errorf(\"Failed to save build: %q\", err)\n\t}\n\n\tg.queue.Add(&queue.BuildTask{Repo: repo, Commit: commit, Build: build, Script: buildscript})\n\n\treturn nil\n}\n\n\/\/ ns namespaces user and repo.\n\/\/ Returns user%2Frepo\nfunc ns(user, repo string) string {\n\treturn fmt.Sprintf(\"%s%%2F%s\", user, repo)\n}\n<commit_msg>Set commit.PullRequest for PullRequestHook<commit_after>package handler\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/drone\/drone\/pkg\/build\/script\"\n\t\"github.com\/drone\/drone\/pkg\/database\"\n\t. \"github.com\/drone\/drone\/pkg\/model\"\n\t\"github.com\/drone\/drone\/pkg\/queue\"\n\t\"github.com\/plouc\/go-gitlab-client\"\n)\n\ntype GitlabHandler struct {\n\tqueue *queue.Queue\n\tapiPath string\n}\n\nfunc NewGitlabHandler(queue *queue.Queue) *GitlabHandler {\n\treturn &GitlabHandler{\n\t\tqueue: queue,\n\t\tapiPath: \"\/api\/v3\",\n\t}\n}\n\nfunc (g *GitlabHandler) Add(w http.ResponseWriter, r *http.Request, u *User) error {\n\tsettings := database.SettingsMust()\n\tteams, err := database.ListTeams(u.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata := struct {\n\t\tUser *User\n\t\tTeams []*Team\n\t\tSettings *Settings\n\t}{u, teams, settings}\n\t\/\/ if the user hasn't linked their GitLab account\n\t\/\/ render a different template\n\tif len(u.GitlabToken) == 0 {\n\t\treturn RenderTemplate(w, \"gitlab_link.html\", &data)\n\t}\n\t\/\/ otherwise display the template for adding\n\t\/\/ a new GitLab repository.\n\treturn RenderTemplate(w, \"gitlab_add.html\", &data)\n}\n\nfunc (g *GitlabHandler) Link(w http.ResponseWriter, r *http.Request, u *User) error {\n\ttoken := strings.TrimSpace(r.FormValue(\"token\"))\n\n\tif len(u.GitlabToken) == 0 || token != u.GitlabToken && len(token) > 0 {\n\t\tu.GitlabToken = token\n\t\tsettings := database.SettingsMust()\n\t\tgl := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, u.GitlabToken)\n\t\t_, err := gl.CurrentUser()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Private Token is not valid: %q\", err)\n\t\t}\n\t\tif err := database.SaveUser(u); err != nil {\n\t\t\treturn RenderError(w, err, http.StatusBadRequest)\n\t\t}\n\t}\n\n\thttp.Redirect(w, r, \"\/new\/gitlab\", http.StatusSeeOther)\n\treturn nil\n}\n\nfunc (g *GitlabHandler) ReLink(w http.ResponseWriter, r *http.Request, u *User) error {\n\tdata := struct {\n\t\tUser *User\n\t}{u}\n\treturn RenderTemplate(w, \"gitlab_link.html\", &data)\n}\n\nfunc (g *GitlabHandler) Create(w http.ResponseWriter, r *http.Request, u *User) error {\n\tteamName := r.FormValue(\"team\")\n\towner := r.FormValue(\"owner\")\n\tname := r.FormValue(\"name\")\n\n\trepo, err := g.newGitlabRepo(u, owner, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(teamName) > 0 {\n\t\tteam, err := database.GetTeamSlug(teamName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to find Team %s.\", teamName)\n\t\t}\n\n\t\t\/\/ user must be an admin member of the team\n\t\tif ok, _ := database.IsMemberAdmin(u.ID, team.ID); !ok {\n\t\t\treturn fmt.Errorf(\"Invalid permission to access Team %s.\", teamName)\n\t\t}\n\t\trepo.TeamID = team.ID\n\t}\n\n\t\/\/ Save to the database\n\tif err := database.SaveRepo(repo); err != nil {\n\t\treturn fmt.Errorf(\"Error saving repository to the database. %s\", err)\n\t}\n\n\treturn RenderText(w, http.StatusText(http.StatusOK), http.StatusOK)\n}\n\nfunc (g *GitlabHandler) newGitlabRepo(u *User, owner, name string) (*Repo, error) {\n\tsettings := database.SettingsMust()\n\tgl := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, u.GitlabToken)\n\n\tproject, err := gl.Project(ns(owner, name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cloneUrl string\n\tif project.Public {\n\t\tcloneUrl = project.HttpRepoUrl\n\t} else {\n\t\tcloneUrl = project.SshRepoUrl\n\t}\n\n\trepo, err := NewRepo(settings.GitlabDomain, owner, name, ScmGit, cloneUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo.UserID = u.ID\n\trepo.Private = !project.Public\n\tif repo.Private {\n\t\t\/\/ name the key\n\t\tkeyName := fmt.Sprintf(\"%s@%s\", repo.Owner, settings.Domain)\n\n\t\t\/\/ TODO: (fudanchii) check if we already opted to use UserKey\n\n\t\t\/\/ create the github key, or update if one already exists\n\t\tif err := gl.AddProjectDeployKey(ns(owner, name), keyName, repo.PublicKey); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to add Public Key to your GitLab repository.\")\n\t\t}\n\t}\n\n\tlink := fmt.Sprintf(\"%s:\/\/%s\/hook\/gitlab?id=%s\", settings.Scheme, settings.Domain, repo.Slug)\n\tif err := gl.AddProjectHook(ns(owner, name), link, true, false, true); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to add Hook to your GitLab repository.\")\n\t}\n\n\treturn repo, err\n}\n\nfunc (g *GitlabHandler) Hook(w http.ResponseWriter, r *http.Request) error {\n\trID := r.FormValue(\"id\")\n\trepo, err := database.GetRepoSlug(rID)\n\tif err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n\n\tuser, err := database.GetUser(repo.UserID)\n\tif err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n\n\tpayload, _ := ioutil.ReadAll(r.Body)\n\tparsed, err := gogitlab.ParseHook(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif parsed.ObjectKind == \"merge_request\" {\n\t\tfmt.Println(string(payload))\n\t\tif err := g.PullRequestHook(parsed, repo, user); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn RenderText(w, http.StatusText(http.StatusOK), http.StatusOK)\n\t}\n\n\tif len(parsed.After) == 0 {\n\t\treturn RenderText(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n\n\t_, err = database.GetCommitHash(parsed.After, repo.ID)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tfmt.Println(\"commit already exists\")\n\t\treturn RenderText(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t}\n\n\tcommit := &Commit{}\n\tcommit.RepoID = repo.ID\n\tcommit.Branch = parsed.Branch()\n\tcommit.Hash = parsed.After\n\tcommit.Status = \"Pending\"\n\tcommit.Created = time.Now().UTC()\n\n\thead := parsed.Head()\n\tcommit.Message = head.Message\n\tcommit.Timestamp = head.Timestamp\n\tif head.Author != nil {\n\t\tcommit.SetAuthor(head.Author.Email)\n\t} else {\n\t\tcommit.SetAuthor(parsed.UserName)\n\t}\n\n\t\/\/ get the github settings from the database\n\tsettings := database.SettingsMust()\n\n\t\/\/ get the drone.yml file from GitHub\n\tclient := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, user.GitlabToken)\n\n\tcontent, err := client.RepoRawFile(ns(repo.Owner, repo.Name), commit.Hash, \".drone.yml\")\n\tif err != nil {\n\t\tmsg := \"No .drone.yml was found in this repository. You need to add one.\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn RenderText(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t}\n\n\t\/\/ parse the build script\n\tbuildscript, err := script.ParseBuild(content, repo.Params)\n\tif err != nil {\n\t\tmsg := \"Could not parse your .drone.yml file. It needs to be a valid drone yaml file.\\n\\n\" + err.Error() + \"\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn RenderText(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t}\n\n\t\/\/ save the commit to the database\n\tif err := database.SaveCommit(commit); err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\t\/\/ save the build to the database\n\tbuild := &Build{}\n\tbuild.Slug = \"1\" \/\/ TODO\n\tbuild.CommitID = commit.ID\n\tbuild.Created = time.Now().UTC()\n\tbuild.Status = \"Pending\"\n\tif err := database.SaveBuild(build); err != nil {\n\t\treturn RenderText(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\tg.queue.Add(&queue.BuildTask{Repo: repo, Commit: commit, Build: build, Script: buildscript})\n\n\t\/\/ OK!\n\treturn RenderText(w, http.StatusText(http.StatusOK), http.StatusOK)\n\n}\n\nfunc (g *GitlabHandler) PullRequestHook(p *gogitlab.HookPayload, repo *Repo, user *User) error {\n\tobj := p.ObjectAttributes\n\n\t\/\/ Gitlab may trigger multiple hooks upon updating merge requests status\n\t\/\/ only build when it was just opened and the merge hasn't been checked yet.\n\tif !(obj.State == \"opened\" && obj.MergeStatus == \"unchecked\") {\n\t\tfmt.Println(\"Ignore GitLab Merge Requests\")\n\t\treturn nil\n\t}\n\n\tsettings := database.SettingsMust()\n\n\tclient := gogitlab.NewGitlab(settings.GitlabApiUrl, g.apiPath, user.GitlabToken)\n\n\t\/\/ GitLab merge-requests hook doesn't include repository data.\n\t\/\/ Have to fetch it manually\n\tsrc, err := client.RepoBranch(strconv.Itoa(obj.SourceProjectId), obj.SourceBranch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = database.GetCommitHash(src.Commit.Id, repo.ID)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tfmt.Println(\"commit already exists\")\n\t\treturn err\n\t}\n\n\tcommit := &Commit{}\n\tcommit.RepoID = repo.ID\n\tcommit.Branch = src.Name\n\tcommit.Hash = src.Commit.Id\n\tcommit.Status = \"Pending\"\n\tcommit.Created = time.Now().UTC()\n\tcommit.PullRequest = strconv.Itoa(obj.IId)\n\n\tcommit.Message = src.Commit.Message\n\tcommit.Timestamp = src.Commit.AuthoredDateRaw\n\tcommit.SetAuthor(src.Commit.Author.Email)\n\n\tcontent, err := client.RepoRawFile(strconv.Itoa(obj.SourceProjectId), commit.Hash, \".drone.yml\")\n\tif err != nil {\n\t\tmsg := \"No .drone.yml was found in this repository. You need to add one.\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to save build: %q\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"Error to fetch build script: %q\", err)\n\t}\n\n\t\/\/ parse the build script\n\tbuildscript, err := script.ParseBuild(content, repo.Params)\n\tif err != nil {\n\t\tmsg := \"Could not parse your .drone.yml file. It needs to be a valid drone yaml file.\\n\\n\" + err.Error() + \"\\n\"\n\t\tif err := saveFailedBuild(commit, msg); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to save build: %q\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to parse build script: %q\", err)\n\t}\n\n\t\/\/ save the commit to the database\n\tif err := database.SaveCommit(commit); err != nil {\n\t\treturn fmt.Errorf(\"Failed to save commit: %q\", err)\n\t}\n\n\t\/\/ save the build to the database\n\tbuild := &Build{}\n\tbuild.Slug = \"1\" \/\/ TODO\n\tbuild.CommitID = commit.ID\n\tbuild.Created = time.Now().UTC()\n\tbuild.Status = \"Pending\"\n\tif err := database.SaveBuild(build); err != nil {\n\t\treturn fmt.Errorf(\"Failed to save build: %q\", err)\n\t}\n\n\tg.queue.Add(&queue.BuildTask{Repo: repo, Commit: commit, Build: build, Script: buildscript})\n\n\treturn nil\n}\n\n\/\/ ns namespaces user and repo.\n\/\/ Returns user%2Frepo\nfunc ns(user, repo string) string {\n\treturn fmt.Sprintf(\"%s%%2F%s\", user, repo)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage nvimutil\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/haya14busa\/errorformat\"\n\t\"github.com\/neovim\/go-client\/nvim\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/zchee\/nvim-go\/pkg\/buildctxt\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/config\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/fs\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/logger\"\n)\n\n\/\/ regexp pattern: https:\/\/regex101.com\/r\/bUVZpH\/2\nvar errRe = regexp.MustCompile(`(?m)^(?:#\\s([[:graph:]]+))?(?:[\\s\\t]+)?([^\\s:]+):(\\d+)(?::(\\d+))?(?::)?\\s(.*)`)\n\n\/\/ ParseError parses a typical Go tools error messages.\nfunc ParseError(ctx context.Context, errmsg []byte, cwd string, bctxt *buildctxt.Build, ignoreDirs []string) ([]*nvim.QuickfixError, error) {\n\tdefer Profile(ctx, time.Now(), \"ParseError\")\n\tspan := trace.FromContext(ctx)\n\tspan.SetName(\"ParseError\")\n\tdefer span.End()\n\n\tif config.IsDebug() {\n\t\tlog := logger.FromContext(ctx)\n\t\tefm, err := errorformat.NewErrorformat([]string{`%f:%l:%c: %m`, `%-G%.%#`})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts := efm.NewScanner(bytes.NewReader(errmsg))\n\t\tfor s.Scan() {\n\t\t\tlog.Debug(\"errorformat: `%%f:%l:%%c: %m`, `%%-G%.%%#`\", zap.Any(\"s.Entry\", s.Entry()))\n\t\t}\n\t}\n\n\tvar (\n\t\t\/\/ packagePath for the save the error files parent directory.\n\t\t\/\/ It will be re-assigned if \"# \" is in the error message.\n\t\tpackagePath string\n\t\terrlist []*nvim.QuickfixError\n\t)\n\n\t\/\/ m[1]: package path with \"# \" prefix\n\t\/\/ m[2]: error files relative path\n\t\/\/ m[3]: line number of error point\n\t\/\/ m[4]: column number of error point\n\t\/\/ m[5]: error description text\n\tfor _, m := range errRe.FindAllSubmatch(errmsg, -1) {\n\t\tif m[1] != nil {\n\t\t\t\/\/ Save the package path for the second subsequent errors\n\t\t\tpackagePath = string(m[1])\n\t\t}\n\t\tfilename := string(m[2])\n\n\t\t\/\/ Avoid the local package error. like \"package foo\" and edit \"cmd\/foo\/main.go\"\n\t\tif !filepath.IsAbs(filename) && packagePath != \"\" {\n\t\t\t\/\/ Joins the packagePath and error file\n\t\t\tfilename = filepath.Join(packagePath, filepath.Base(filename))\n\t\t}\n\n\t\t\/\/ Cleanup filename to relative path of current working directory\n\t\tswitch bctxt.Tool {\n\t\tcase \"go\":\n\t\t\tvar sep string\n\t\t\tswitch {\n\t\t\t\/\/ filename has not directory path\n\t\t\tcase filepath.Dir(filename) == \".\":\n\t\t\t\tfilename = filepath.Join(cwd, filename)\n\t\t\t\/\/ not contains '#' package title in errror\n\t\t\tcase strings.HasPrefix(filename, cwd):\n\t\t\t\tsep = cwd\n\t\t\t\tfilename = strings.TrimPrefix(filename, sep+string(filepath.Separator))\n\t\t\t\/\/ filename is like \"github.com\/foo\/bar.go\"\n\t\t\tcase strings.HasPrefix(filename, fs.TrimGoPath(cwd)):\n\t\t\t\tsep = fs.TrimGoPath(cwd) + string(filepath.Separator)\n\t\t\t\tfilename = strings.TrimPrefix(filename, sep)\n\t\t\tdefault:\n\t\t\t\tfilename = fs.JoinGoPath(filename)\n\t\t\t}\n\t\tcase \"gb\":\n\t\t\t\/\/ gb compiler error messages is relative filename path of project root dir\n\t\t\tif !filepath.IsAbs(filename) {\n\t\t\t\tfilename = filepath.Join(bctxt.ProjectRoot, \"src\", filename)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"unknown compiler tool\")\n\t\t}\n\n\t\t\/\/ Finally, try to convert the relative path from cwd\n\t\tfilename = fs.Rel(cwd, filename)\n\t\tif ignoreDirs != nil {\n\t\t\tif contains(filename, ignoreDirs) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ line is necessary for error messages\n\t\tline, err := strconv.Atoi(string(m[3]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Ignore err because fail strconv.Atoi will assign 0 to col\n\t\tcol, _ := strconv.Atoi(string(m[4]))\n\n\t\terrlist = append(errlist, &nvim.QuickfixError{\n\t\t\tFileName: filename,\n\t\t\tLNum: line,\n\t\t\tCol: col,\n\t\t\tText: string(bytes.TrimSpace(m[5])),\n\t\t})\n\t}\n\n\treturn errlist, nil\n}\n\nfunc contains(s string, substr []string) bool {\n\tfor _, str := range substr {\n\t\tif strings.Contains(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>pkg\/nvimutil: fix errorformat import path<commit_after>\/\/ Copyright 2018 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage nvimutil\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/reviewdog\/errorformat\"\n\t\"github.com\/neovim\/go-client\/nvim\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/zchee\/nvim-go\/pkg\/buildctxt\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/config\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/fs\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/logger\"\n)\n\n\/\/ regexp pattern: https:\/\/regex101.com\/r\/bUVZpH\/2\nvar errRe = regexp.MustCompile(`(?m)^(?:#\\s([[:graph:]]+))?(?:[\\s\\t]+)?([^\\s:]+):(\\d+)(?::(\\d+))?(?::)?\\s(.*)`)\n\n\/\/ ParseError parses a typical Go tools error messages.\nfunc ParseError(ctx context.Context, errmsg []byte, cwd string, bctxt *buildctxt.Build, ignoreDirs []string) ([]*nvim.QuickfixError, error) {\n\tdefer Profile(ctx, time.Now(), \"ParseError\")\n\tspan := trace.FromContext(ctx)\n\tspan.SetName(\"ParseError\")\n\tdefer span.End()\n\n\tif config.IsDebug() {\n\t\tlog := logger.FromContext(ctx)\n\t\tefm, err := errorformat.NewErrorformat([]string{`%f:%l:%c: %m`, `%-G%.%#`})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts := efm.NewScanner(bytes.NewReader(errmsg))\n\t\tfor s.Scan() {\n\t\t\tlog.Debug(\"errorformat: `%%f:%l:%%c: %m`, `%%-G%.%%#`\", zap.Any(\"s.Entry\", s.Entry()))\n\t\t}\n\t}\n\n\tvar (\n\t\t\/\/ packagePath for the save the error files parent directory.\n\t\t\/\/ It will be re-assigned if \"# \" is in the error message.\n\t\tpackagePath string\n\t\terrlist []*nvim.QuickfixError\n\t)\n\n\t\/\/ m[1]: package path with \"# \" prefix\n\t\/\/ m[2]: error files relative path\n\t\/\/ m[3]: line number of error point\n\t\/\/ m[4]: column number of error point\n\t\/\/ m[5]: error description text\n\tfor _, m := range errRe.FindAllSubmatch(errmsg, -1) {\n\t\tif m[1] != nil {\n\t\t\t\/\/ Save the package path for the second subsequent errors\n\t\t\tpackagePath = string(m[1])\n\t\t}\n\t\tfilename := string(m[2])\n\n\t\t\/\/ Avoid the local package error. like \"package foo\" and edit \"cmd\/foo\/main.go\"\n\t\tif !filepath.IsAbs(filename) && packagePath != \"\" {\n\t\t\t\/\/ Joins the packagePath and error file\n\t\t\tfilename = filepath.Join(packagePath, filepath.Base(filename))\n\t\t}\n\n\t\t\/\/ Cleanup filename to relative path of current working directory\n\t\tswitch bctxt.Tool {\n\t\tcase \"go\":\n\t\t\tvar sep string\n\t\t\tswitch {\n\t\t\t\/\/ filename has not directory path\n\t\t\tcase filepath.Dir(filename) == \".\":\n\t\t\t\tfilename = filepath.Join(cwd, filename)\n\t\t\t\/\/ not contains '#' package title in errror\n\t\t\tcase strings.HasPrefix(filename, cwd):\n\t\t\t\tsep = cwd\n\t\t\t\tfilename = strings.TrimPrefix(filename, sep+string(filepath.Separator))\n\t\t\t\/\/ filename is like \"github.com\/foo\/bar.go\"\n\t\t\tcase strings.HasPrefix(filename, fs.TrimGoPath(cwd)):\n\t\t\t\tsep = fs.TrimGoPath(cwd) + string(filepath.Separator)\n\t\t\t\tfilename = strings.TrimPrefix(filename, sep)\n\t\t\tdefault:\n\t\t\t\tfilename = fs.JoinGoPath(filename)\n\t\t\t}\n\t\tcase \"gb\":\n\t\t\t\/\/ gb compiler error messages is relative filename path of project root dir\n\t\t\tif !filepath.IsAbs(filename) {\n\t\t\t\tfilename = filepath.Join(bctxt.ProjectRoot, \"src\", filename)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"unknown compiler tool\")\n\t\t}\n\n\t\t\/\/ Finally, try to convert the relative path from cwd\n\t\tfilename = fs.Rel(cwd, filename)\n\t\tif ignoreDirs != nil {\n\t\t\tif contains(filename, ignoreDirs) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ line is necessary for error messages\n\t\tline, err := strconv.Atoi(string(m[3]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Ignore err because fail strconv.Atoi will assign 0 to col\n\t\tcol, _ := strconv.Atoi(string(m[4]))\n\n\t\terrlist = append(errlist, &nvim.QuickfixError{\n\t\t\tFileName: filename,\n\t\t\tLNum: line,\n\t\t\tCol: col,\n\t\t\tText: string(bytes.TrimSpace(m[5])),\n\t\t})\n\t}\n\n\treturn errlist, nil\n}\n\nfunc contains(s string, substr []string) bool {\n\tfor _, str := range substr {\n\t\tif strings.Contains(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 the LinuxBoot Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage visitors\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/linuxboot\/fiano\/pkg\/uefi\"\n)\n\n\/\/ Table prints the GUIDS, types and sizes as a compact table.\ntype Table struct {\n\tW *tabwriter.Writer\n\tLayout bool\n\tTopLevel bool\n\tindent int\n\toffset uint64\n\tcurOffset uint64\n\tdepth int\n}\n\n\/\/ Run wraps Visit and performs some setup and teardown tasks.\nfunc (v *Table) Run(f uefi.Firmware) error {\n\treturn f.Apply(v)\n}\n\n\/\/ Visit applies the Table visitor to any Firmware type.\nfunc (v *Table) Visit(f uefi.Firmware) error {\n\tvar offset uint64\n\tswitch f := f.(type) {\n\tcase *uefi.FlashImage:\n\t\tv.depth = v.indent + 1\n\t\treturn v.printRow(f, \"Image\", \"\", \"\", 0, 0)\n\tcase *uefi.FirmwareVolume:\n\t\treturn v.printRow(f, \"FV\", f.FileSystemGUID.String(), \"\", v.offset+f.FVOffset, v.offset+f.FVOffset+f.DataOffset)\n\tcase *uefi.File:\n\t\t\/\/ TODO: make name part of the file node\n\t\treturn v.printRow(f, \"File\", f.Header.GUID.String(), f.Header.Type, v.curOffset, v.curOffset+f.DataOffset)\n\tcase *uefi.Section:\n\t\t\/\/ Reset offset to O for (compressed) section content\n\t\treturn v.printRow(f, \"Sec\", f.String(), f.Type, v.curOffset, 0)\n\tcase *uefi.FlashDescriptor:\n\t\tv.depth = v.indent + 1\n\t\treturn v.printRow(f, \"IFD\", \"\", \"\", 0, 0)\n\tcase *uefi.BIOSRegion:\n\t\tv.depth = v.indent + 1\n\t\tif f.FRegion != nil {\n\t\t\toffset = uint64(f.FRegion.BaseOffset())\n\t\t}\n\t\treturn v.printRow(f, \"BIOS\", \"\", \"\", offset, offset)\n\tcase *uefi.BIOSPadding:\n\t\treturn v.printRow(f, \"BIOS Pad\", \"\", \"\", v.offset+f.Offset, 0)\n\tcase *uefi.NVarStore:\n\t\treturn v.printRow(f, \"NVAR Store\", \"\", \"\", v.curOffset, v.curOffset)\n\tcase *uefi.NVar:\n\t\treturn v.printRow(f, \"NVAR\", f.GUID.String(), f, v.curOffset, v.curOffset+uint64(f.DataOffset))\n\tcase *uefi.RawRegion:\n\t\tv.depth = v.indent + 1\n\t\tif f.FRegion != nil {\n\t\t\toffset = uint64(f.FRegion.BaseOffset())\n\t\t}\n\t\treturn v.printRow(f, f.Type().String(), \"\", \"\", offset, offset)\n\tdefault:\n\t\treturn v.printRow(f, fmt.Sprintf(\"%T\", f), \"\", \"\", 0, 0)\n\t}\n}\n\nfunc indent(n int) string {\n\treturn strings.Repeat(\" \", n)\n}\n\nfunc (v *Table) printRow(f uefi.Firmware, node, name, typez interface{}, offset, dataOffset uint64) error {\n\tif v.W == nil {\n\t\tv.W = tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)\n\t\tdefer func() { v.W.Flush() }()\n\t\tif v.Layout {\n\t\t\tfmt.Fprintf(v.W, \"%sNode\\tGUID\/Name\\tOffset\\tSize\\n\", indent(v.indent))\n\t\t} else {\n\t\t\tfmt.Fprintf(v.W, \"%sNode\\tGUID\/Name\\tType\\tSize\\n\", indent(v.indent))\n\t\t}\n\t}\n\tlength := uint64(len(f.Buf()))\n\tif typez == \"\" {\n\t\tif uefi.IsErased(f.Buf(), uefi.Attributes.ErasePolarity) {\n\t\t\ttypez = \"(empty)\"\n\t\t}\n\t}\n\tif v.Layout {\n\t\tif name == \"\" {\n\t\t\tname = typez\n\t\t}\n\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%#08x\\t%#08x\\n\", indent(v.indent), node, name, offset, length)\n\t} else {\n\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%v\\t%#8x\\n\", indent(v.indent), node, name, typez, length)\n\t}\n\tv2 := *v\n\tv2.indent++\n\tv2.offset = dataOffset\n\tv2.curOffset = v2.offset\n\tif !v.TopLevel || v.indent < v.depth {\n\n\t\tif err := f.ApplyChildren(&v2); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tv.curOffset += length\n\tswitch f := f.(type) {\n\tcase *uefi.FirmwareVolume:\n\t\t\/\/ Print free space at the end of the volume\n\t\tif v.Layout {\n\t\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%#08x\\t%#08x\\n\", indent(v2.indent), \"Free\", \"\", offset+length-f.FreeSpace, f.FreeSpace)\n\t\t} else {\n\t\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%v\\t%#8x\\n\", indent(v2.indent), \"Free\", \"\", \"\", f.FreeSpace)\n\t\t}\n\tcase *uefi.NVarStore:\n\t\t\/\/ Print free space and GUID store\n\t\tif v.Layout {\n\t\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%#08x\\t%#08x\\n\", indent(v2.indent), \"Free\", \"\", offset+f.FreeSpaceOffset, f.GUIDStoreOffset-f.FreeSpaceOffset)\n\t\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%#08x\\t%#08x\\n\", indent(v2.indent), \"GUIDStore\", fmt.Sprintf(\"%d GUID\", len(f.GUIDStore)), offset+f.GUIDStoreOffset, f.Length-f.GUIDStoreOffset)\n\t\t} else {\n\t\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%v\\t%#8x\\n\", indent(v2.indent), \"Free\", \"\", \"\", f.GUIDStoreOffset-f.FreeSpaceOffset)\n\t\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%v\\t%#8x\\n\", indent(v2.indent), \"GUIDStore\", \"\", fmt.Sprintf(\"%d GUID\", len(f.GUIDStore)), f.Length-f.GUIDStoreOffset)\n\t\t}\n\tcase *uefi.File:\n\t\t\/\/ Align\n\t\t\/\/ TODO: do we need the complex align logic from assemble?\n\t\tv.curOffset = uefi.Align8(v.curOffset)\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tRegisterCLI(\"table\", \"print out important information in a pretty table\", 0, func(args []string) (uefi.Visitor, error) {\n\t\treturn &Table{}, nil\n\t})\n\tRegisterCLI(\"layout-table\", \"print out offset and size information of top level firmware volumes in a pretty table\", 0, func(args []string) (uefi.Visitor, error) {\n\t\treturn &Table{Layout: true, TopLevel: true}, nil\n\t})\n\tRegisterCLI(\"layout-table-full\", \"print out offset and size information in a pretty table\", 0, func(args []string) (uefi.Visitor, error) {\n\t\treturn &Table{Layout: true}, nil\n\t})\n}\n<commit_msg>visitor\/table: Refactor printRow for layout view<commit_after>\/\/ Copyright 2018 the LinuxBoot Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage visitors\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/linuxboot\/fiano\/pkg\/uefi\"\n)\n\n\/\/ Table prints the GUIDS, types and sizes as a compact table.\ntype Table struct {\n\tW *tabwriter.Writer\n\tLayout bool\n\tTopLevel bool\n\tindent int\n\toffset uint64\n\tcurOffset uint64\n\tdepth int\n}\n\n\/\/ Run wraps Visit and performs some setup and teardown tasks.\nfunc (v *Table) Run(f uefi.Firmware) error {\n\treturn f.Apply(v)\n}\n\n\/\/ Visit applies the Table visitor to any Firmware type.\nfunc (v *Table) Visit(f uefi.Firmware) error {\n\tvar offset uint64\n\tswitch f := f.(type) {\n\tcase *uefi.FlashImage:\n\t\tv.depth = v.indent + 1\n\t\treturn v.printFirmware(f, \"Image\", \"\", \"\", 0, 0)\n\tcase *uefi.FirmwareVolume:\n\t\treturn v.printFirmware(f, \"FV\", f.FileSystemGUID.String(), \"\", v.offset+f.FVOffset, v.offset+f.FVOffset+f.DataOffset)\n\tcase *uefi.File:\n\t\t\/\/ TODO: make name part of the file node\n\t\treturn v.printFirmware(f, \"File\", f.Header.GUID.String(), f.Header.Type, v.curOffset, v.curOffset+f.DataOffset)\n\tcase *uefi.Section:\n\t\t\/\/ Reset offset to O for (compressed) section content\n\t\treturn v.printFirmware(f, \"Sec\", f.String(), f.Type, v.curOffset, 0)\n\tcase *uefi.FlashDescriptor:\n\t\tv.depth = v.indent + 1\n\t\treturn v.printFirmware(f, \"IFD\", \"\", \"\", 0, 0)\n\tcase *uefi.BIOSRegion:\n\t\tv.depth = v.indent + 1\n\t\tif f.FRegion != nil {\n\t\t\toffset = uint64(f.FRegion.BaseOffset())\n\t\t}\n\t\treturn v.printFirmware(f, \"BIOS\", \"\", \"\", offset, offset)\n\tcase *uefi.BIOSPadding:\n\t\treturn v.printFirmware(f, \"BIOS Pad\", \"\", \"\", v.offset+f.Offset, 0)\n\tcase *uefi.NVarStore:\n\t\treturn v.printFirmware(f, \"NVAR Store\", \"\", \"\", v.curOffset, v.curOffset)\n\tcase *uefi.NVar:\n\t\treturn v.printFirmware(f, \"NVAR\", f.GUID.String(), f, v.curOffset, v.curOffset+uint64(f.DataOffset))\n\tcase *uefi.RawRegion:\n\t\tv.depth = v.indent + 1\n\t\tif f.FRegion != nil {\n\t\t\toffset = uint64(f.FRegion.BaseOffset())\n\t\t}\n\t\treturn v.printFirmware(f, f.Type().String(), \"\", \"\", offset, offset)\n\tdefault:\n\t\treturn v.printFirmware(f, fmt.Sprintf(\"%T\", f), \"\", \"\", 0, 0)\n\t}\n}\n\nfunc indent(n int) string {\n\treturn strings.Repeat(\" \", n)\n}\n\nfunc (v *Table) printFirmware(f uefi.Firmware, node, name, typez interface{}, offset, dataOffset uint64) error {\n\tif v.W == nil {\n\t\tv.W = tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)\n\t\tdefer func() { v.W.Flush() }()\n\t\tif v.Layout {\n\t\t\tfmt.Fprintf(v.W, \"%sNode\\tGUID\/Name\\tOffset\\tSize\\n\", indent(v.indent))\n\t\t} else {\n\t\t\tfmt.Fprintf(v.W, \"%sNode\\tGUID\/Name\\tType\\tSize\\n\", indent(v.indent))\n\t\t}\n\t}\n\tlength := uint64(len(f.Buf()))\n\tif typez == \"\" {\n\t\tif uefi.IsErased(f.Buf(), uefi.Attributes.ErasePolarity) {\n\t\t\ttypez = \"(empty)\"\n\t\t}\n\t}\n\tv.printRow(node, name, typez, offset, length)\n\n\tv2 := *v\n\tv2.indent++\n\tv2.offset = dataOffset\n\tv2.curOffset = v2.offset\n\tif !v.TopLevel || v.indent < v.depth {\n\n\t\tif err := f.ApplyChildren(&v2); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tv.curOffset += length\n\tswitch f := f.(type) {\n\tcase *uefi.FirmwareVolume:\n\t\t\/\/ Print free space at the end of the volume\n\t\tv2.printRow(\"Free\", \"\", \"\", offset+length-f.FreeSpace, f.FreeSpace)\n\tcase *uefi.NVarStore:\n\t\t\/\/ Print free space and GUID store\n\t\tv2.printRow(\"Free\", \"\", \"\", offset+f.FreeSpaceOffset, f.GUIDStoreOffset-f.FreeSpaceOffset)\n\t\tv2.printRow(\"GUIDStore\", \"\", fmt.Sprintf(\"%d GUID\", len(f.GUIDStore)), offset+f.GUIDStoreOffset, f.Length-f.GUIDStoreOffset)\n\tcase *uefi.File:\n\t\t\/\/ Align\n\t\t\/\/ TODO: do we need the complex align logic from assemble?\n\t\tv.curOffset = uefi.Align8(v.curOffset)\n\t}\n\treturn nil\n}\n\nfunc (v *Table) printRow(node, name, typez interface{}, offset, length uint64) {\n\tif v.Layout {\n\t\tif name == \"\" {\n\t\t\tname = typez\n\t\t}\n\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%#08x\\t%#08x\\n\", indent(v.indent), node, name, offset, length)\n\t} else {\n\t\tfmt.Fprintf(v.W, \"%s%v\\t%v\\t%v\\t%#8x\\n\", indent(v.indent), node, name, typez, length)\n\t}\n}\n\nfunc init() {\n\tRegisterCLI(\"table\", \"print out important information in a pretty table\", 0, func(args []string) (uefi.Visitor, error) {\n\t\treturn &Table{}, nil\n\t})\n\tRegisterCLI(\"layout-table\", \"print out offset and size information of top level firmware volumes in a pretty table\", 0, func(args []string) (uefi.Visitor, error) {\n\t\treturn &Table{Layout: true, TopLevel: true}, nil\n\t})\n\tRegisterCLI(\"layout-table-full\", \"print out offset and size information in a pretty table\", 0, func(args []string) (uefi.Visitor, error) {\n\t\treturn &Table{Layout: true}, nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"bufio\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"os\"\n)\n\n\/\/\n\/\/ XDrive and plugin commincate via so called delim stream. Each message starts with\n\/\/ an unsigned uvarint32 for message length (uvarint32 encoding as defined in protobuf,\n\/\/ notice, it is unsigned). Then, a protobuf message.\n\/\/\n\/\/ NOTE: that the message size can be 0 -- in fact, the trivial message of last\n\/\/ read op, is size 0. Must handle correctly.\n\/\/\n\n\/\/ golang binary.ReadUvarint requires a byte reader.\nvar stdin *bufio.Reader = bufio.NewReader(os.Stdin)\n\nfunc DelimRead(pb proto.Message) error {\n\tDbgLog(\"Delim read ... \")\n\tmsgsz, err := binary.ReadUvarint(stdin)\n\tif err != nil {\n\t\tDbgLogIfErr(err, \"Delim read error, msgsz is %d\", msgsz)\n\t\treturn err\n\t}\n\n\tDbgLog(\"Delim read msg %d bytes ... \", msgsz)\n\tbuf := make([]byte, msgsz)\n\trsz, err := stdin.Read(buf)\n\n\tif uint64(rsz) != msgsz {\n\t\tDbgLogIfErr(err, \"Delim read data error, msgsz is %d\", msgsz)\n\t\t\/\/ don't check err, because EOF is a real error here.\n\t\treturn fmt.Errorf(\"delim read short read msg\")\n\t}\n\n\terr = proto.Unmarshal(buf, pb)\n\tDbgLogIfErr(err, \"Unmarshal error\")\n\treturn err\n}\n\n\/\/ NOTE: we do not wrap os.Stdout with a bufio -- actually, better not, because we\n\/\/ want to push message over the wire instead of buffering it.\nfunc DelimWrite(pb proto.Message) error {\n\tmsg, err := proto.Marshal(pb)\n\tif err != nil {\n\t\tDbgLogIfErr(err, \"Marshal error.\")\n\t\treturn err\n\t}\n\n\tmsgsz := len(msg)\n\tDbgLog(\"Delim write %d bytes ... \", msgsz)\n\n\tszbuf := make([]byte, 20)\n\tszsz := binary.PutUvarint(szbuf, uint64(msgsz))\n\tDbgLog(\"Delim write %d bytes, szsz %d.\", msgsz, szsz)\n\twsz, err := os.Stdout.Write(szbuf[:szsz])\n\tif wsz != szsz {\n\t\tDbgLog(\"Delim write msg sz %d short write (%d)\", szsz, wsz)\n\t\treturn fmt.Errorf(\"delim write short write msg sz\")\n\t}\n\n\tif msgsz > 0 {\n\t\twsz, err = os.Stdout.Write(msg)\n\t\tif wsz != msgsz {\n\t\t\tDbgLog(\"Delim write msg %d bytes short write (%d)\", msgsz, wsz)\n\t\t\treturn fmt.Errorf(\"delim write short write msg\")\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Handle short read.<commit_after>package plugin\n\nimport (\n\t\"bufio\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"os\"\n)\n\n\/\/\n\/\/ XDrive and plugin commincate via so called delim stream. Each message starts with\n\/\/ an unsigned uvarint32 for message length (uvarint32 encoding as defined in protobuf,\n\/\/ notice, it is unsigned). Then, a protobuf message.\n\/\/\n\/\/ NOTE: that the message size can be 0 -- in fact, the trivial message of last\n\/\/ read op, is size 0. Must handle correctly.\n\/\/\n\n\/\/ golang binary.ReadUvarint requires a byte reader.\nvar stdin *bufio.Reader = bufio.NewReader(os.Stdin)\n\nfunc loopread(buf []byte, msgsz int) (int, error) {\n\tvar sz int\n\tfor sz = 0; sz < msgsz; {\n\t\trsz, err := stdin.Read(buf[sz:])\n\t\tif err != nil {\n\t\t\treturn sz, err\n\t\t}\n\t\tsz += rsz\n\t}\n\treturn sz, nil\n}\n\nfunc DelimRead(pb proto.Message) error {\n\tDbgLog(\"Delim read ... \")\n\tmsgsz, err := binary.ReadUvarint(stdin)\n\tif err != nil {\n\t\tDbgLogIfErr(err, \"Delim read error, msgsz is %d\", msgsz)\n\t\treturn err\n\t}\n\n\tDbgLog(\"Delim read msg %d bytes ... \", msgsz)\n\tbuf := make([]byte, msgsz)\n\trsz, err := loopread(buf, int(msgsz))\n\n\tif uint64(rsz) != msgsz {\n\t\tDbgLog(\"Delim read short read. msgsz is %d, read %d, err %v.\", msgsz, rsz, err)\n\t\t\/\/ don't check err, because EOF is a real error here.\n\t\treturn fmt.Errorf(\"delim read short read msg\")\n\t}\n\n\terr = proto.Unmarshal(buf, pb)\n\tDbgLogIfErr(err, \"Unmarshal error\")\n\treturn err\n}\n\n\/\/ NOTE: we do not wrap os.Stdout with a bufio -- actually, better not, because we\n\/\/ want to push message over the wire instead of buffering it.\nfunc DelimWrite(pb proto.Message) error {\n\tmsg, err := proto.Marshal(pb)\n\tif err != nil {\n\t\tDbgLogIfErr(err, \"Marshal error.\")\n\t\treturn err\n\t}\n\n\tmsgsz := len(msg)\n\tDbgLog(\"Delim write %d bytes ... \", msgsz)\n\n\tszbuf := make([]byte, 20)\n\tszsz := binary.PutUvarint(szbuf, uint64(msgsz))\n\tDbgLog(\"Delim write %d bytes, szsz %d.\", msgsz, szsz)\n\twsz, err := os.Stdout.Write(szbuf[:szsz])\n\tif wsz != szsz {\n\t\tDbgLog(\"Delim write msg sz %d short write (%d)\", szsz, wsz)\n\t\treturn fmt.Errorf(\"delim write short write msg sz\")\n\t}\n\n\tif msgsz > 0 {\n\t\twsz, err = os.Stdout.Write(msg)\n\t\tif wsz != msgsz {\n\t\t\tDbgLog(\"Delim write msg %d bytes short write (%d)\", msgsz, wsz)\n\t\t\treturn fmt.Errorf(\"delim write short write msg\")\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package genhandler\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/grpc-ecosystem\/grpc-gateway\/protoc-gen-grpc-gateway\/descriptor\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\terrNoTargetService = errors.New(\"no target service defined in the file\")\n)\n\ntype param struct {\n\t*descriptor.File\n\tImports []descriptor.GoPackage\n\tSwagBuffer []byte\n}\n\nfunc applyTemplate(p param) (string, error) {\n\tw := bytes.NewBuffer(nil)\n\tif err := headerTemplate.Execute(w, p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := regTemplate.ExecuteTemplate(w, \"base\", p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttype swaggerTmpl struct {\n\t\tFileName string\n\t\tSwagger string\n\t}\n\n\tif err := footerTemplate.Execute(w, p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := patternsTemplate.ExecuteTemplate(w, \"base\", p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn w.String(), nil\n}\n\nvar (\n\tfuncMap = template.FuncMap{\n\t\t\"dotToUnderscore\": func(s string) string { return strings.Replace(strings.Replace(s, \".\", \"_\", -1), \"\/\", \"_\", -1) },\n\t\t\"byteStr\": func(b []byte) string { return string(b) },\n\t\t\"escapeBackTicks\": func(s string) string { return strings.Replace(s, \"`\", \"` + \\\"``\\\" + `\", -1) },\n\t}\n\n\theaderTemplate = template.Must(template.New(\"header\").Parse(`\n\/\/ Code generated by protoc-gen-goclay\n\/\/ source: {{.GetName}}\n\/\/ DO NOT EDIT!\n\n\/*\nPackage {{.GoPkg.Name}} is a self-registering gRPC and JSON+Swagger service definition.\n\nIt conforms to the github.com\/utrack\/clay Service interface.\n*\/\npackage {{.GoPkg.Name}}\nimport (\n\t{{range $i := .Imports}}{{if $i.Standard}}{{$i | printf \"%s\\n\"}}{{end}}{{end}}\n\n\t{{range $i := .Imports}}{{if not $i.Standard}}{{$i | printf \"%s\\n\"}}{{end}}{{end}}\n)\n\n\/\/ Update your shared lib or downgrade generator to v1 if there's an error\nvar _ = transport.IsVersion2\n\nvar _ chi.Router\nvar _ runtime.Marshaler\n`))\n\tregTemplate = template.Must(template.New(\"svc-reg\").Funcs(funcMap).Parse(`\n{{define \"base\"}}\n{{range $svc := .Services}}\n\/\/ {{$svc.GetName}}Desc is a descriptor\/registrator for the {{$svc.GetName}}Server.\ntype {{$svc.GetName}}Desc struct {\n svc {{$svc.GetName}}Server\n}\n\n\/\/ New{{$svc.GetName}}ServiceDesc creates new registrator for the {{$svc.GetName}}Server.\nfunc New{{$svc.GetName}}ServiceDesc(svc {{$svc.GetName}}Server) *{{$svc.GetName}}Desc {\n return &{{$svc.GetName}}Desc{svc:svc}\n}\n\n\/\/ RegisterGRPC implements service registrator interface.\nfunc (d *{{$svc.GetName}}Desc) RegisterGRPC(s *grpc.Server) {\n Register{{$svc.GetName}}Server(s,d.svc)\n}\n\n\/\/ SwaggerDef returns this file's Swagger definition.\nfunc (d *{{$svc.GetName}}Desc) SwaggerDef() []byte {\n return _swaggerDef_{{dotToUnderscore $.GetName}}\n}\n\n\/\/ RegisterHTTP registers this service's HTTP handlers\/bindings.\nfunc (d *{{$svc.GetName}}Desc) RegisterHTTP(mux transport.Router) {\n\t{{range $m := $svc.Methods}}\n\t\/\/ Handlers for {{$m.GetName}}\n\t{{range $b := $m.Bindings}}\n\tmux.MethodFunc(pattern_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}},\"{{$b.HTTPMethod}}\", func(w http.ResponseWriter, r *http.Request) {\n defer r.Body.Close()\n\n\t var req {{$m.RequestType.GetName}}\n err := unmarshaler_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(r,&req)\n\t if err != nil {\n\t httpruntime.SetError(r.Context(),r,w,errors.Wrap(err,\"couldn't parse request\"))\n\t return\n\t }\n\n\t ret,err := d.svc.{{$m.GetName}}(r.Context(),&req)\n\t if err != nil {\n\t httpruntime.SetError(r.Context(),r,w,errors.Wrap(err,\"returned from handler\"))\n\t return\n\t }\n\n _,outbound := httpruntime.MarshalerForRequest(r)\n w.Header().Set(\"Content-Type\", outbound.ContentType())\n\t err = outbound.Marshal(w, ret)\n\t if err != nil {\n\t httpruntime.SetError(r.Context(),r,w,errors.Wrap(err,\"couldn't write response\"))\n\t return\n\t }\n })\n {{end}}\n {{end}}\n}\n{{end}}\n{{end}} \/\/ base service handler ended\n`))\n\n\tfooterTemplate = template.Must(template.New(\"footer\").Funcs(funcMap).Parse(`\nvar _swaggerDef_{{dotToUnderscore .GetName}} = []byte(` + \"`\" + `{{escapeBackTicks (byteStr .SwagBuffer)}}` + `\n` + \"`)\" + `\n`))\n\n\tpatternsTemplate = template.Must(template.New(\"patterns\").Parse(`\n{{define \"base\"}}\nvar (\n{{range $svc := .Services}}\n{{range $m := $svc.Methods}}\n{{range $b := $m.Bindings}}\n\tpattern_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} = \"{{$b.PathTmpl.Template}}\"\n unmarshaler_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} = func(r *http.Request,req *{{$m.RequestType.GetName}}) error {\n\n {{if not $b.Body}}\n {{else}}\n {{template \"unmbody\" .}}\n {{end}}\n {{if not $b.PathParams}}\n {{ else }}\n {{template \"unmpath\" .}}\n {{end}}\n }\n{{end}}\n{{end}}\n{{end}}\n)\n{{end}}\n{{define \"unmbody\"}}\n inbound,_ := httpruntime.MarshalerForRequest(r)\n\t return errors.Wrap(inbound.Unmarshal(r.Body,req),\"couldn't read request JSON\")\n{{end}}\n{{define \"unmpath\"}}\n\t rctx := chi.RouteContext(r.Context())\n if rctx == nil {\n panic(\"Only chi router is supported for GETs atm\")\n\t }\n for pos,k := range rctx.URLParams.Keys {\n\t runtime.PopulateFieldFromPath(req, k, rctx.URLParams.Values[pos])\n }\n return nil\n{{end}}\n`))\n)\n<commit_msg>Now rendering empty GET requests w\/o params correctly<commit_after>package genhandler\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/grpc-ecosystem\/grpc-gateway\/protoc-gen-grpc-gateway\/descriptor\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\terrNoTargetService = errors.New(\"no target service defined in the file\")\n)\n\ntype param struct {\n\t*descriptor.File\n\tImports []descriptor.GoPackage\n\tSwagBuffer []byte\n}\n\nfunc applyTemplate(p param) (string, error) {\n\tw := bytes.NewBuffer(nil)\n\tif err := headerTemplate.Execute(w, p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := regTemplate.ExecuteTemplate(w, \"base\", p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttype swaggerTmpl struct {\n\t\tFileName string\n\t\tSwagger string\n\t}\n\n\tif err := footerTemplate.Execute(w, p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := patternsTemplate.ExecuteTemplate(w, \"base\", p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn w.String(), nil\n}\n\nvar (\n\tfuncMap = template.FuncMap{\n\t\t\"dotToUnderscore\": func(s string) string { return strings.Replace(strings.Replace(s, \".\", \"_\", -1), \"\/\", \"_\", -1) },\n\t\t\"byteStr\": func(b []byte) string { return string(b) },\n\t\t\"escapeBackTicks\": func(s string) string { return strings.Replace(s, \"`\", \"` + \\\"``\\\" + `\", -1) },\n\t}\n\n\theaderTemplate = template.Must(template.New(\"header\").Parse(`\n\/\/ Code generated by protoc-gen-goclay\n\/\/ source: {{.GetName}}\n\/\/ DO NOT EDIT!\n\n\/*\nPackage {{.GoPkg.Name}} is a self-registering gRPC and JSON+Swagger service definition.\n\nIt conforms to the github.com\/utrack\/clay Service interface.\n*\/\npackage {{.GoPkg.Name}}\nimport (\n\t{{range $i := .Imports}}{{if $i.Standard}}{{$i | printf \"%s\\n\"}}{{end}}{{end}}\n\n\t{{range $i := .Imports}}{{if not $i.Standard}}{{$i | printf \"%s\\n\"}}{{end}}{{end}}\n)\n\n\/\/ Update your shared lib or downgrade generator to v1 if there's an error\nvar _ = transport.IsVersion2\n\nvar _ chi.Router\nvar _ runtime.Marshaler\n`))\n\tregTemplate = template.Must(template.New(\"svc-reg\").Funcs(funcMap).Parse(`\n{{define \"base\"}}\n{{range $svc := .Services}}\n\/\/ {{$svc.GetName}}Desc is a descriptor\/registrator for the {{$svc.GetName}}Server.\ntype {{$svc.GetName}}Desc struct {\n svc {{$svc.GetName}}Server\n}\n\n\/\/ New{{$svc.GetName}}ServiceDesc creates new registrator for the {{$svc.GetName}}Server.\nfunc New{{$svc.GetName}}ServiceDesc(svc {{$svc.GetName}}Server) *{{$svc.GetName}}Desc {\n return &{{$svc.GetName}}Desc{svc:svc}\n}\n\n\/\/ RegisterGRPC implements service registrator interface.\nfunc (d *{{$svc.GetName}}Desc) RegisterGRPC(s *grpc.Server) {\n Register{{$svc.GetName}}Server(s,d.svc)\n}\n\n\/\/ SwaggerDef returns this file's Swagger definition.\nfunc (d *{{$svc.GetName}}Desc) SwaggerDef() []byte {\n return _swaggerDef_{{dotToUnderscore $.GetName}}\n}\n\n\/\/ RegisterHTTP registers this service's HTTP handlers\/bindings.\nfunc (d *{{$svc.GetName}}Desc) RegisterHTTP(mux transport.Router) {\n\t{{range $m := $svc.Methods}}\n\t\/\/ Handlers for {{$m.GetName}}\n\t{{range $b := $m.Bindings}}\n\tmux.MethodFunc(pattern_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}},\"{{$b.HTTPMethod}}\", func(w http.ResponseWriter, r *http.Request) {\n defer r.Body.Close()\n\n\t var req {{$m.RequestType.GetName}}\n err := unmarshaler_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(r,&req)\n\t if err != nil {\n\t httpruntime.SetError(r.Context(),r,w,errors.Wrap(err,\"couldn't parse request\"))\n\t return\n\t }\n\n\t ret,err := d.svc.{{$m.GetName}}(r.Context(),&req)\n\t if err != nil {\n\t httpruntime.SetError(r.Context(),r,w,errors.Wrap(err,\"returned from handler\"))\n\t return\n\t }\n\n _,outbound := httpruntime.MarshalerForRequest(r)\n w.Header().Set(\"Content-Type\", outbound.ContentType())\n\t err = outbound.Marshal(w, ret)\n\t if err != nil {\n\t httpruntime.SetError(r.Context(),r,w,errors.Wrap(err,\"couldn't write response\"))\n\t return\n\t }\n })\n {{end}}\n {{end}}\n}\n{{end}}\n{{end}} \/\/ base service handler ended\n`))\n\n\tfooterTemplate = template.Must(template.New(\"footer\").Funcs(funcMap).Parse(`\nvar _swaggerDef_{{dotToUnderscore .GetName}} = []byte(` + \"`\" + `{{escapeBackTicks (byteStr .SwagBuffer)}}` + `\n` + \"`)\" + `\n`))\n\n\tpatternsTemplate = template.Must(template.New(\"patterns\").Parse(`\n{{define \"base\"}}\nvar (\n{{range $svc := .Services}}\n{{range $m := $svc.Methods}}\n{{range $b := $m.Bindings}}\n\tpattern_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} = \"{{$b.PathTmpl.Template}}\"\n unmarshaler_goclay_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} = func(r *http.Request,req *{{$m.RequestType.GetName}}) error {\n\n {{if $b.Body}}\n {{template \"unmbody\" .}}\n {{end}}\n {{if $b.PathParams}}\n {{template \"unmpath\" .}}\n {{end}}\n\n {{if and (not $b.Body) (not $b.PathParams)}}\n return nil\n {{end}}\n }\n{{end}}\n{{end}}\n{{end}}\n)\n{{end}}\n{{define \"unmbody\"}}\n inbound,_ := httpruntime.MarshalerForRequest(r)\n\t return errors.Wrap(inbound.Unmarshal(r.Body,req),\"couldn't read request JSON\")\n{{end}}\n{{define \"unmpath\"}}\n\t rctx := chi.RouteContext(r.Context())\n if rctx == nil {\n panic(\"Only chi router is supported for GETs atm\")\n\t }\n for pos,k := range rctx.URLParams.Keys {\n\t runtime.PopulateFieldFromPath(req, k, rctx.URLParams.Values[pos])\n }\n return nil\n{{end}}\n`))\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/TykTechnologies\/tyk\/config\"\n)\n\ntype NodeResponseOK struct {\n\tStatus string\n\tMessage map[string]string\n\tNonce string\n}\n\ntype DashboardServiceSender interface {\n\tInit() error\n\tRegister() error\n\tDeRegister() error\n\tStartBeating() error\n\tStopBeating()\n}\n\ntype HTTPDashboardHandler struct {\n\tRegistrationEndpoint string\n\tDeRegistrationEndpoint string\n\tHeartBeatEndpoint string\n\tSecret string\n\n\theartBeatStopSentinel bool\n}\n\nfunc initialiseClient(timeout time.Duration) *http.Client {\n\tclient := &http.Client{}\n\tif config.Global().HttpServerOptions.UseSSL {\n\t\t\/\/ Setup HTTPS client\n\t\ttlsConfig := &tls.Config{\n\t\t\tInsecureSkipVerify: config.Global().HttpServerOptions.SSLInsecureSkipVerify,\n\t\t}\n\t\ttransport := &http.Transport{TLSClientConfig: tlsConfig}\n\t\tclient = &http.Client{Transport: transport, Timeout: timeout}\n\t} else {\n\t\tclient = &http.Client{Timeout: timeout}\n\t}\n\treturn client\n}\n\nfunc reLogin() {\n\tif !config.Global().UseDBAppConfigs {\n\t\treturn\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"prefix\": \"main\",\n\t}).Info(\"Registering node (again).\")\n\tDashService.StopBeating()\n\tif err := DashService.DeRegister(); err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"main\",\n\t\t}).Error(\"Could not deregister: \", err)\n\t}\n\n\ttime.Sleep(5 * time.Second)\n\n\tif err := DashService.Register(); err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"main\",\n\t\t}).Error(\"Could not register: \", err)\n\t} else {\n\t\tgo DashService.StartBeating()\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"prefix\": \"main\",\n\t}).Info(\"Recovering configurations, reloading...\")\n\treloadURLStructure(nil)\n}\n\nfunc (h *HTTPDashboardHandler) Init() error {\n\th.RegistrationEndpoint = buildConnStr(\"\/register\/node\")\n\th.DeRegistrationEndpoint = buildConnStr(\"\/system\/node\")\n\th.HeartBeatEndpoint = buildConnStr(\"\/register\/ping\")\n\tif h.Secret = config.Global().NodeSecret; h.Secret == \"\" {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"main\",\n\t\t}).Fatal(\"Node secret is not set, required for dashboard connection\")\n\t}\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) Register() error {\n\treq := h.newRequest(h.RegistrationEndpoint)\n\tc := initialiseClient(5 * time.Second)\n\tresp, err := c.Do(req)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Request failed with error %v; retrying in 5s\", err)\n\t\ttime.Sleep(time.Second * 5)\n\t\treturn h.Register()\n\t} else if resp != nil && resp.StatusCode != 200 {\n\t\tlog.Errorf(\"Response failed with code %d; retrying in 5s\", resp.StatusCode)\n\t\ttime.Sleep(time.Second * 5)\n\t\treturn h.Register()\n\t}\n\n\tdefer resp.Body.Close()\n\tval := NodeResponseOK{}\n\tif err := json.NewDecoder(resp.Body).Decode(&val); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set the NodeID\n\tvar found bool\n\tNodeID, found = val.Message[\"NodeID\"]\n\tif !found {\n\t\tlog.Error(\"Failed to register node, retrying in 5s\")\n\t\ttime.Sleep(time.Second * 5)\n\t\treturn h.Register()\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"prefix\": \"dashboard\",\n\t\t\"id\": NodeID,\n\t}).Info(\"Node registered\")\n\n\t\/\/ Set the nonce\n\tServiceNonce = val.Nonce\n\tlog.Debug(\"Registration Finished: Nonce Set: \", ServiceNonce)\n\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) StartBeating() error {\n\tfor !h.heartBeatStopSentinel {\n\t\tif err := h.sendHeartBeat(); err != nil {\n\t\t\tlog.Warning(err)\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t}\n\n\tlog.Info(\"Stopped Heartbeat\")\n\th.heartBeatStopSentinel = false\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) StopBeating() {\n\th.heartBeatStopSentinel = true\n}\n\nfunc (h *HTTPDashboardHandler) newRequest(endpoint string) *http.Request {\n\treq, err := http.NewRequest(\"GET\", endpoint, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"authorization\", h.Secret)\n\treq.Header.Set(\"x-tyk-hostname\", hostDetails.Hostname)\n\treturn req\n}\n\nfunc (h *HTTPDashboardHandler) sendHeartBeat() error {\n\treq := h.newRequest(h.HeartBeatEndpoint)\n\treq.Header.Set(\"x-tyk-nodeid\", NodeID)\n\treq.Header.Set(\"x-tyk-nonce\", ServiceNonce)\n\tc := initialiseClient(5 * time.Second)\n\n\tresp, err := c.Do(req)\n\tif err != nil || resp.StatusCode != 200 {\n\t\treturn errors.New(\"dashboard is down? Heartbeat is failing\")\n\t}\n\n\tdefer resp.Body.Close()\n\tval := NodeResponseOK{}\n\tif err := json.NewDecoder(resp.Body).Decode(&val); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set the nonce\n\tServiceNonce = val.Nonce\n\t\/\/log.Debug(\"Heartbeat Finished: Nonce Set: \", ServiceNonce)\n\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) DeRegister() error {\n\treq := h.newRequest(h.DeRegistrationEndpoint)\n\n\treq.Header.Set(\"x-tyk-nodeid\", NodeID)\n\treq.Header.Set(\"x-tyk-nonce\", ServiceNonce)\n\n\tc := initialiseClient(5 * time.Second)\n\tresp, err := c.Do(req)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deregister request failed with error %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"deregister request failed with status %v\", resp.StatusCode)\n\t}\n\n\tval := NodeResponseOK{}\n\tif err := json.NewDecoder(resp.Body).Decode(&val); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set the nonce\n\tServiceNonce = val.Nonce\n\tlog.Info(\"De-registered.\")\n\n\treturn nil\n}\n<commit_msg>Reuse client for heartbeat. (#1946)<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/TykTechnologies\/tyk\/config\"\n)\n\ntype NodeResponseOK struct {\n\tStatus string\n\tMessage map[string]string\n\tNonce string\n}\n\ntype DashboardServiceSender interface {\n\tInit() error\n\tRegister() error\n\tDeRegister() error\n\tStartBeating() error\n\tStopBeating()\n}\n\ntype HTTPDashboardHandler struct {\n\tRegistrationEndpoint string\n\tDeRegistrationEndpoint string\n\tHeartBeatEndpoint string\n\tSecret string\n\n\theartBeatStopSentinel bool\n}\n\nfunc initialiseClient(timeout time.Duration) *http.Client {\n\tclient := &http.Client{}\n\tif config.Global().HttpServerOptions.UseSSL {\n\t\t\/\/ Setup HTTPS client\n\t\ttlsConfig := &tls.Config{\n\t\t\tInsecureSkipVerify: config.Global().HttpServerOptions.SSLInsecureSkipVerify,\n\t\t}\n\t\ttransport := &http.Transport{TLSClientConfig: tlsConfig}\n\t\tclient = &http.Client{Transport: transport, Timeout: timeout}\n\t} else {\n\t\tclient = &http.Client{Timeout: timeout}\n\t}\n\treturn client\n}\n\nfunc reLogin() {\n\tif !config.Global().UseDBAppConfigs {\n\t\treturn\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"prefix\": \"main\",\n\t}).Info(\"Registering node (again).\")\n\tDashService.StopBeating()\n\tif err := DashService.DeRegister(); err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"main\",\n\t\t}).Error(\"Could not deregister: \", err)\n\t}\n\n\ttime.Sleep(5 * time.Second)\n\n\tif err := DashService.Register(); err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"main\",\n\t\t}).Error(\"Could not register: \", err)\n\t} else {\n\t\tgo DashService.StartBeating()\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"prefix\": \"main\",\n\t}).Info(\"Recovering configurations, reloading...\")\n\treloadURLStructure(nil)\n}\n\nfunc (h *HTTPDashboardHandler) Init() error {\n\th.RegistrationEndpoint = buildConnStr(\"\/register\/node\")\n\th.DeRegistrationEndpoint = buildConnStr(\"\/system\/node\")\n\th.HeartBeatEndpoint = buildConnStr(\"\/register\/ping\")\n\tif h.Secret = config.Global().NodeSecret; h.Secret == \"\" {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"main\",\n\t\t}).Fatal(\"Node secret is not set, required for dashboard connection\")\n\t}\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) Register() error {\n\treq := h.newRequest(h.RegistrationEndpoint)\n\tc := initialiseClient(5 * time.Second)\n\tresp, err := c.Do(req)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Request failed with error %v; retrying in 5s\", err)\n\t\ttime.Sleep(time.Second * 5)\n\t\treturn h.Register()\n\t} else if resp != nil && resp.StatusCode != 200 {\n\t\tlog.Errorf(\"Response failed with code %d; retrying in 5s\", resp.StatusCode)\n\t\ttime.Sleep(time.Second * 5)\n\t\treturn h.Register()\n\t}\n\n\tdefer resp.Body.Close()\n\tval := NodeResponseOK{}\n\tif err := json.NewDecoder(resp.Body).Decode(&val); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set the NodeID\n\tvar found bool\n\tNodeID, found = val.Message[\"NodeID\"]\n\tif !found {\n\t\tlog.Error(\"Failed to register node, retrying in 5s\")\n\t\ttime.Sleep(time.Second * 5)\n\t\treturn h.Register()\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"prefix\": \"dashboard\",\n\t\t\"id\": NodeID,\n\t}).Info(\"Node registered\")\n\n\t\/\/ Set the nonce\n\tServiceNonce = val.Nonce\n\tlog.Debug(\"Registration Finished: Nonce Set: \", ServiceNonce)\n\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) StartBeating() error {\n\n\treq := h.newRequest(h.HeartBeatEndpoint)\n\n\tclient := initialiseClient(5 * time.Second)\n\n\tfor !h.heartBeatStopSentinel {\n\t\tif err := h.sendHeartBeat(req, client); err != nil {\n\t\t\tlog.Warning(err)\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t}\n\n\tlog.Info(\"Stopped Heartbeat\")\n\th.heartBeatStopSentinel = false\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) StopBeating() {\n\th.heartBeatStopSentinel = true\n}\n\nfunc (h *HTTPDashboardHandler) newRequest(endpoint string) *http.Request {\n\treq, err := http.NewRequest(\"GET\", endpoint, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"authorization\", h.Secret)\n\treq.Header.Set(\"x-tyk-hostname\", hostDetails.Hostname)\n\treturn req\n}\n\nfunc (h *HTTPDashboardHandler) sendHeartBeat(req *http.Request, client *http.Client) error {\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"dashboard is down? Heartbeat is failing\")\n\t}\n\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn errors.New(\"dashboard is down? Heartbeat non-200 response\")\n\t}\n\tval := NodeResponseOK{}\n\tif err := json.NewDecoder(resp.Body).Decode(&val); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set the nonce\n\tServiceNonce = val.Nonce\n\t\/\/log.Debug(\"Heartbeat Finished: Nonce Set: \", ServiceNonce)\n\n\treturn nil\n}\n\nfunc (h *HTTPDashboardHandler) DeRegister() error {\n\treq := h.newRequest(h.DeRegistrationEndpoint)\n\n\treq.Header.Set(\"x-tyk-nodeid\", NodeID)\n\treq.Header.Set(\"x-tyk-nonce\", ServiceNonce)\n\n\tc := initialiseClient(5 * time.Second)\n\tresp, err := c.Do(req)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deregister request failed with error %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"deregister request failed with status %v\", resp.StatusCode)\n\t}\n\n\tval := NodeResponseOK{}\n\tif err := json.NewDecoder(resp.Body).Decode(&val); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set the nonce\n\tServiceNonce = val.Nonce\n\tlog.Info(\"De-registered.\")\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package serverutils provides utilities to work with the cloudprober's external probe.\npackage serverutils\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/golang\/protobuf\/proto\"\n)\n\nfunc readPayload(r *bufio.Reader) ([]byte, error) {\n\t\/\/ header format is: \"\\nContent-Length: %d\\n\\n\"\n\tconst prefix = \"Content-Length: \"\n\tvar line string\n\tvar length int\n\tvar err error\n\n\t\/\/ Read lines until header line is found\n\tfor {\n\t\tline, err = r.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif strings.HasPrefix(line, prefix) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Parse content length from the header\n\tlength, err = strconv.Atoi(line[len(prefix) : len(line)-1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Consume the blank line following the header line\n\tif _, err = r.ReadSlice('\\n'); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Slurp in the payload\n\tbuf := make([]byte, length)\n\tif _, err = io.ReadFull(r, buf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}\n\n\/\/ ReadProbeReply reads ProbeReply from the supplied bufio.Reader and returns it to\n\/\/ the caller.\nfunc ReadProbeReply(r *bufio.Reader) (*ProbeReply, error) {\n\tbuf, err := readPayload(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trep := new(ProbeReply)\n\treturn rep, proto.Unmarshal(buf, rep)\n}\n\n\/\/ ReadProbeRequest reads and parses ProbeRequest protocol buffers from the given\n\/\/ bufio.Reader.\nfunc ReadProbeRequest(r *bufio.Reader) (*ProbeRequest, error) {\n\tbuf, err := readPayload(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq := new(ProbeRequest)\n\treturn req, proto.Unmarshal(buf, req)\n}\n\nfunc writeReplies(repliesChan chan *ProbeReply) {\n\tfor r := range repliesChan {\n\t\tbuf, err := proto.Marshal(r)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed marshalling response: %v\", err)\n\t\t}\n\t\tif _, err := fmt.Fprintf(os.Stdout, \"\\nContent-Length: %d\\n\\n%s\", len(buf), buf); err != nil {\n\t\t\tlog.Fatalf(\"Failed writing response: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ Serve blocks indefinitely, servicing probe requests. Note that this function is\n\/\/ provided mainly to help external probe server implementations. Cloudprober doesn't\n\/\/ make use of it. Example usage:\n\/\/\timport (\n\/\/\t\tserverpb \"github.com\/google\/cloudprober\/probes\/external\/serverutils\/server_proto\"\n\/\/\t\t\"github.com\/google\/cloudprober\/probes\/external\/serverutils\/serverutils\"\n\/\/\t)\n\/\/\tfunc runProbe(opts []*cppb.ProbeRequest_Option) {\n\/\/ \t...\n\/\/\t}\n\/\/\tserverutils.Serve(func(req *ProbeRequest, reply *ProbeReply) {\n\/\/ \t\tpayload, errMsg, _ := runProbe(req.GetOptions())\n\/\/\t\treply.Payload = proto.String(payload)\n\/\/\t\tif errMsg != \"\" {\n\/\/\t\t\treply.ErrorMessage = proto.String(errMsg)\n\/\/\t\t}\n\/\/\t})\nfunc Serve(probeFunc func(*ProbeRequest, *ProbeReply)) {\n\tstdin := bufio.NewReader(os.Stdin)\n\n\trepliesChan := make(chan *ProbeReply)\n\n\t\/\/ Write replies to stdout. These are not required to be in-order.\n\tgo writeReplies(repliesChan)\n\n\t\/\/ Read requests from stdin, and dispatch probes to service them.\n\tfor {\n\t\trequest, err := ReadProbeRequest(stdin)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed reading request: %v\", err)\n\t\t}\n\t\tlog.Infof(\"Received request id: %d\", request.RequestId)\n\t\tgo func() {\n\t\t\treply := &ProbeReply{\n\t\t\t\tRequestId: request.RequestId,\n\t\t\t}\n\t\t\tdone := make(chan bool, 1)\n\t\t\ttimeout := time.After(time.Duration(*request.TimeLimit) * time.Millisecond)\n\t\t\tgo func() {\n\t\t\t\tprobeFunc(request, reply)\n\t\t\t\tdone <- true\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\trepliesChan <- reply\n\t\t\tcase <-timeout:\n\t\t\t\t\/\/ drop the request on the floor.\n\t\t\t\tlog.Warningf(\"Timeout for request %v\\n\", *reply.RequestId)\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Timeout for request %v\\n\", *reply.RequestId)\n\t\t\t}\n\t\t}()\n\t}\n}\n<commit_msg>Remove unnecessary logging in the external prober.<commit_after>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package serverutils provides utilities to work with the cloudprober's external probe.\npackage serverutils\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n)\n\nfunc readPayload(r *bufio.Reader) ([]byte, error) {\n\t\/\/ header format is: \"\\nContent-Length: %d\\n\\n\"\n\tconst prefix = \"Content-Length: \"\n\tvar line string\n\tvar length int\n\tvar err error\n\n\t\/\/ Read lines until header line is found\n\tfor {\n\t\tline, err = r.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif strings.HasPrefix(line, prefix) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Parse content length from the header\n\tlength, err = strconv.Atoi(line[len(prefix) : len(line)-1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Consume the blank line following the header line\n\tif _, err = r.ReadSlice('\\n'); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Slurp in the payload\n\tbuf := make([]byte, length)\n\tif _, err = io.ReadFull(r, buf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}\n\n\/\/ ReadProbeReply reads ProbeReply from the supplied bufio.Reader and returns it to\n\/\/ the caller.\nfunc ReadProbeReply(r *bufio.Reader) (*ProbeReply, error) {\n\tbuf, err := readPayload(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trep := new(ProbeReply)\n\treturn rep, proto.Unmarshal(buf, rep)\n}\n\n\/\/ ReadProbeRequest reads and parses ProbeRequest protocol buffers from the given\n\/\/ bufio.Reader.\nfunc ReadProbeRequest(r *bufio.Reader) (*ProbeRequest, error) {\n\tbuf, err := readPayload(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq := new(ProbeRequest)\n\treturn req, proto.Unmarshal(buf, req)\n}\n\nfunc writeReplies(repliesChan chan *ProbeReply) {\n\tfor r := range repliesChan {\n\t\tbuf, err := proto.Marshal(r)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed marshalling response: %v\", err)\n\t\t}\n\t\tif _, err := fmt.Fprintf(os.Stdout, \"\\nContent-Length: %d\\n\\n%s\", len(buf), buf); err != nil {\n\t\t\tlog.Fatalf(\"Failed writing response: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ Serve blocks indefinitely, servicing probe requests. Note that this function is\n\/\/ provided mainly to help external probe server implementations. Cloudprober doesn't\n\/\/ make use of it. Example usage:\n\/\/\timport (\n\/\/\t\tserverpb \"github.com\/google\/cloudprober\/probes\/external\/serverutils\/server_proto\"\n\/\/\t\t\"github.com\/google\/cloudprober\/probes\/external\/serverutils\/serverutils\"\n\/\/\t)\n\/\/\tfunc runProbe(opts []*cppb.ProbeRequest_Option) {\n\/\/ \t...\n\/\/\t}\n\/\/\tserverutils.Serve(func(req *ProbeRequest, reply *ProbeReply) {\n\/\/ \t\tpayload, errMsg, _ := runProbe(req.GetOptions())\n\/\/\t\treply.Payload = proto.String(payload)\n\/\/\t\tif errMsg != \"\" {\n\/\/\t\t\treply.ErrorMessage = proto.String(errMsg)\n\/\/\t\t}\n\/\/\t})\nfunc Serve(probeFunc func(*ProbeRequest, *ProbeReply)) {\n\tstdin := bufio.NewReader(os.Stdin)\n\n\trepliesChan := make(chan *ProbeReply)\n\n\t\/\/ Write replies to stdout. These are not required to be in-order.\n\tgo writeReplies(repliesChan)\n\n\t\/\/ Read requests from stdin, and dispatch probes to service them.\n\tfor {\n\t\trequest, err := ReadProbeRequest(stdin)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed reading request: %v\", err)\n\t\t}\n\t\tgo func() {\n\t\t\treply := &ProbeReply{\n\t\t\t\tRequestId: request.RequestId,\n\t\t\t}\n\t\t\tdone := make(chan bool, 1)\n\t\t\ttimeout := time.After(time.Duration(*request.TimeLimit) * time.Millisecond)\n\t\t\tgo func() {\n\t\t\t\tprobeFunc(request, reply)\n\t\t\t\tdone <- true\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\trepliesChan <- reply\n\t\t\tcase <-timeout:\n\t\t\t\t\/\/ drop the request on the floor.\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Timeout for request %v\\n\", *reply.RequestId)\n\t\t\t}\n\t\t}()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The go-github AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage github\n\nimport (\n\t\"testing\"\n)\n\nfunc TestEditChange_Marshal_TitleChange(t *testing.T) {\n\ttestJSONMarshal(t, &EditChange{}, \"{}\")\n\n\tu := &EditChange{\n\t\tTitle: &EditTitle{\n\t\t\tFrom: String(\"TitleFrom\"),\n\t\t},\n\t\tBody: nil,\n\t\tBase: nil,\n\t}\n\n\twant := `{\n\t\t\"title\": {\n\t\t\t\"from\": \"TitleFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestEditChange_Marshal_BodyChange(t *testing.T) {\n\ttestJSONMarshal(t, &EditChange{}, \"{}\")\n\n\tu := &EditChange{\n\t\tTitle: nil,\n\t\tBody: &EditBody{\n\t\t\tFrom: String(\"BodyFrom\"),\n\t\t},\n\t\tBase: nil,\n\t}\n\n\twant := `{\n\t\t\"body\": {\n\t\t\t\"from\": \"BodyFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestEditChange_Marshal_BaseChange(t *testing.T) {\n\ttestJSONMarshal(t, &EditChange{}, \"{}\")\n\n\tBase := EditBase{\n\t\tRef: &EditRef{\n\t\t\tFrom: String(\"BaseRefFrom\"),\n\t\t},\n\t\tSHA: &EditSHA{\n\t\t\tFrom: String(\"BaseSHAFrom\"),\n\t\t},\n\t}\n\n\tu := &EditChange{\n\t\tTitle: nil,\n\t\tBody: nil,\n\t\tBase: &Base,\n\t}\n\n\twant := `{\n\t\t\"base\": {\n\t\t\t\"ref\": {\n\t\t\t\t\"from\": \"BaseRefFrom\"\n\t\t\t},\n\t\t\t\"sha\": {\n\t\t\t\t\"from\": \"BaseSHAFrom\"\n\t\t\t}\n\t\t}\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n<commit_msg>Add test cases for JSON resource marshaling (#1902)<commit_after>\/\/ Copyright 2020 The go-github AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage github\n\nimport (\n\t\"testing\"\n)\n\nfunc TestEditChange_Marshal_TitleChange(t *testing.T) {\n\ttestJSONMarshal(t, &EditChange{}, \"{}\")\n\n\tu := &EditChange{\n\t\tTitle: &EditTitle{\n\t\t\tFrom: String(\"TitleFrom\"),\n\t\t},\n\t\tBody: nil,\n\t\tBase: nil,\n\t}\n\n\twant := `{\n\t\t\"title\": {\n\t\t\t\"from\": \"TitleFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestEditChange_Marshal_BodyChange(t *testing.T) {\n\ttestJSONMarshal(t, &EditChange{}, \"{}\")\n\n\tu := &EditChange{\n\t\tTitle: nil,\n\t\tBody: &EditBody{\n\t\t\tFrom: String(\"BodyFrom\"),\n\t\t},\n\t\tBase: nil,\n\t}\n\n\twant := `{\n\t\t\"body\": {\n\t\t\t\"from\": \"BodyFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestEditChange_Marshal_BaseChange(t *testing.T) {\n\ttestJSONMarshal(t, &EditChange{}, \"{}\")\n\n\tBase := EditBase{\n\t\tRef: &EditRef{\n\t\t\tFrom: String(\"BaseRefFrom\"),\n\t\t},\n\t\tSHA: &EditSHA{\n\t\t\tFrom: String(\"BaseSHAFrom\"),\n\t\t},\n\t}\n\n\tu := &EditChange{\n\t\tTitle: nil,\n\t\tBody: nil,\n\t\tBase: &Base,\n\t}\n\n\twant := `{\n\t\t\"base\": {\n\t\t\t\"ref\": {\n\t\t\t\t\"from\": \"BaseRefFrom\"\n\t\t\t},\n\t\t\t\"sha\": {\n\t\t\t\t\"from\": \"BaseSHAFrom\"\n\t\t\t}\n\t\t}\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestProjectChange_Marshal_NameChange(t *testing.T) {\n\ttestJSONMarshal(t, &ProjectChange{}, \"{}\")\n\n\tNameFrom := struct {\n\t\tFrom *string `json:\"from,omitempty\"`\n\t}{\n\t\tFrom: String(\"NameFrom\"),\n\t}\n\n\tu := &ProjectChange{\n\t\tName: &NameFrom,\n\t\tBody: nil,\n\t}\n\n\twant := `{\n\t\t\"name\": {\n\t\t\t\"from\": \"NameFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestProjectChange_Marshal_BodyChange(t *testing.T) {\n\ttestJSONMarshal(t, &ProjectChange{}, \"{}\")\n\n\tBodyFrom := struct {\n\t\tFrom *string `json:\"from,omitempty\"`\n\t}{\n\t\tFrom: String(\"BodyFrom\"),\n\t}\n\n\tu := &ProjectChange{\n\t\tName: nil,\n\t\tBody: &BodyFrom,\n\t}\n\n\twant := `{\n\t\t\"body\": {\n\t\t\t\"from\": \"BodyFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestProjectCardChange_Marshal_NoteChange(t *testing.T) {\n\ttestJSONMarshal(t, &ProjectCardChange{}, \"{}\")\n\n\tNoteFrom := struct {\n\t\tFrom *string `json:\"from,omitempty\"`\n\t}{\n\t\tFrom: String(\"NoteFrom\"),\n\t}\n\n\tu := &ProjectCardChange{\n\t\tNote: &NoteFrom,\n\t}\n\n\twant := `{\n\t\t\"note\": {\n\t\t\t\"from\": \"NoteFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n\nfunc TestProjectColumnChange_Marshal_NameChange(t *testing.T) {\n\ttestJSONMarshal(t, &ProjectColumnChange{}, \"{}\")\n\n\tNameFrom := struct {\n\t\tFrom *string `json:\"from,omitempty\"`\n\t}{\n\t\tFrom: String(\"NameFrom\"),\n\t}\n\n\tu := &ProjectColumnChange{\n\t\tName: &NameFrom,\n\t}\n\n\twant := `{\n\t\t\"name\": {\n\t\t\t\"from\": \"NameFrom\"\n\t\t }\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\ntype authenticateRequest struct {\n\tUserName string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\ntype authenticateResponse struct {\n\tToken string `json:\"token\"`\n}\n\nfunc (h apiHandler) postAuthenticate(resp http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\tvar authRequest authenticateRequest\n\tif err := readJSONFromRequest(req, &authRequest); err != nil {\n\t\th.JSON(resp, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tuser, err := h.model.Users.Authenticate(authRequest.UserName, authRequest.Password)\n\tif err != nil {\n\t\th.JSON(resp, http.StatusUnauthorized, err.Error())\n\t\treturn\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.StandardClaims{\n\t\tExpiresAt: time.Now().Add(time.Hour * 14 * 24).Unix(),\n\t\tIssuedAt: time.Now().Unix(),\n\t\tSubject: strconv.FormatInt(user.ID, 10),\n\t})\n\t\/\/ Always sign using the 0'th key\n\ttokenStr, err := token.SignedString([]byte(h.cfg.SecureKeys[0]))\n\tif err != nil {\n\t\th.JSON(resp, http.StatusInternalServerError, err.Error())\n\t}\n\n\th.JSON(resp, http.StatusOK, authenticateResponse{Token: tokenStr})\n}\n\nfunc (h apiHandler) requireAuthentication(handler httprouter.Handle) httprouter.Handle {\n\treturn func(resp http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\t\tauthHeader := req.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\th.JSON(resp, http.StatusUnauthorized, \"Authorization header missing\")\n\t\t\treturn\n\t\t}\n\n\t\tauthHeaderParts := strings.Split(authHeader, \" \")\n\t\tif len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != \"bearer\" {\n\t\t\th.JSON(resp, http.StatusUnauthorized, \"Authorization header must be in the form 'Bearer {token}'\")\n\t\t\treturn\n\t\t}\n\n\t\ttokenStr := authHeaderParts[1]\n\n\t\t\/\/ Try each key when validating the token\n\t\tvar lastErr error\n\t\tfor _, key := range h.cfg.SecureKeys {\n\t\t\ttoken, err := jwt.ParseWithClaims(tokenStr, &jwt.StandardClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\tif token.Method != jwt.SigningMethodHS256 {\n\t\t\t\t\treturn nil, errors.New(\"Incorrect signing method\")\n\t\t\t\t}\n\n\t\t\t\treturn []byte(key), nil\n\t\t\t})\n\n\t\t\tif err == nil && token.Valid {\n\t\t\t\t\/\/ TODO: Verify this is a valid user in the DB\n\n\t\t\t\t\/\/ Add the user's ID to the list of params\n\t\t\t\tclaims := token.Claims.(*jwt.StandardClaims)\n\t\t\t\tp = append(p, httprouter.Param{Key: \"CurrentUserID\", Value: claims.Subject})\n\n\t\t\t\thandler(resp, req, p)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlastErr = err\n\t\t}\n\n\t\th.JSON(resp, http.StatusUnauthorized, lastErr.Error())\n\t}\n}\n<commit_msg>Make sure the user in the JWT token exists<commit_after>package api\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\ntype authenticateRequest struct {\n\tUserName string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\ntype authenticateResponse struct {\n\tToken string `json:\"token\"`\n}\n\nfunc (h apiHandler) postAuthenticate(resp http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\tvar authRequest authenticateRequest\n\tif err := readJSONFromRequest(req, &authRequest); err != nil {\n\t\th.JSON(resp, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tuser, err := h.model.Users.Authenticate(authRequest.UserName, authRequest.Password)\n\tif err != nil {\n\t\th.JSON(resp, http.StatusUnauthorized, err.Error())\n\t\treturn\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.StandardClaims{\n\t\tExpiresAt: time.Now().Add(time.Hour * 14 * 24).Unix(),\n\t\tIssuedAt: time.Now().Unix(),\n\t\tSubject: strconv.FormatInt(user.ID, 10),\n\t})\n\t\/\/ Always sign using the 0'th key\n\ttokenStr, err := token.SignedString([]byte(h.cfg.SecureKeys[0]))\n\tif err != nil {\n\t\th.JSON(resp, http.StatusInternalServerError, err.Error())\n\t}\n\n\th.JSON(resp, http.StatusOK, authenticateResponse{Token: tokenStr})\n}\n\nfunc (h apiHandler) requireAuthentication(handler httprouter.Handle) httprouter.Handle {\n\treturn func(resp http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\t\tauthHeader := req.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\th.JSON(resp, http.StatusUnauthorized, \"Authorization header missing\")\n\t\t\treturn\n\t\t}\n\n\t\tauthHeaderParts := strings.Split(authHeader, \" \")\n\t\tif len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != \"bearer\" {\n\t\t\th.JSON(resp, http.StatusUnauthorized, \"Authorization header must be in the form 'Bearer {token}'\")\n\t\t\treturn\n\t\t}\n\n\t\ttokenStr := authHeaderParts[1]\n\n\t\t\/\/ Try each key when validating the token\n\t\tvar lastErr error\n\t\tfor _, key := range h.cfg.SecureKeys {\n\t\t\ttoken, err := jwt.ParseWithClaims(tokenStr, &jwt.StandardClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\tif token.Method != jwt.SigningMethodHS256 {\n\t\t\t\t\treturn nil, errors.New(\"Incorrect signing method\")\n\t\t\t\t}\n\n\t\t\t\treturn []byte(key), nil\n\t\t\t})\n\n\t\t\tif err == nil && token.Valid {\n\t\t\t\tclaims := token.Claims.(*jwt.StandardClaims)\n\t\t\t\tif err = h.verifyUserExists(claims); err == nil {\n\t\t\t\t\t\/\/ Add the user's ID to the list of params\n\t\t\t\t\tp = append(p, httprouter.Param{Key: \"CurrentUserID\", Value: claims.Subject})\n\n\t\t\t\t\thandler(resp, req, p)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tlastErr = err\n\t\t}\n\n\t\th.JSON(resp, http.StatusUnauthorized, lastErr.Error())\n\t}\n}\n\nfunc (h apiHandler) verifyUserExists(claims *jwt.StandardClaims) error {\n\tuserID, err := strconv.ParseInt(claims.Subject, 10, 64)\n\tif err != nil {\n\t\treturn errors.New(\"invalid claims\")\n\t}\n\n\t\/\/ Verify this is a valid user in the DB\n\tif _, err = h.model.Users.Read(userID); err != nil {\n\t\treturn errors.New(\"invalid claims\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package engine\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase_1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\nfunc runIdentify(username string) (idUI *FakeIdentifyUI, res *IdentifyRes, err error) {\n\tidUI = &FakeIdentifyUI{Proofs: make(map[string]string)}\n\targ := IdentifyEngineArg{\n\t\tUser: username,\n\t}\n\tctx := Context{\n\t\tLogUI: G.UI.GetLogUI(),\n\t\tIdentifyUI: idUI,\n\t}\n\teng := NewIdentifyEngine(&arg)\n\terr = RunEngine(eng, &ctx, nil, nil)\n\tres = eng.Result()\n\treturn\n}\n\nfunc checkAliceProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"alice\", true, map[string]string{\n\t\t\"github\": \"kbtester2\",\n\t\t\"twitter\": \"tacovontaco\",\n\t})\n}\n\nfunc checkBobProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"bob\", true, map[string]string{\n\t\t\"github\": \"kbtester1\",\n\t\t\"twitter\": \"kbtester1\",\n\t})\n}\n\nfunc checkCharlieProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"charlie\", true, map[string]string{\n\t\t\"github\": \"tacoplusplus\",\n\t\t\"twitter\": \"tacovontaco\",\n\t})\n}\n\nfunc checkDougProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"doug\", false, map[string]string{})\n}\n\nfunc checkKeyedProfile(t *testing.T, idUI *FakeIdentifyUI, result *IdentifyRes, name string, hasImg bool, expectedProofs map[string]string) {\n\tif exported := result.User.Export(); !reflect.DeepEqual(idUI.User, exported) {\n\t\tt.Fatal(\"LaunchNetworkChecks User not equal to result user.\", idUI.User, exported)\n\t}\n\n\tif hasImg && result.User.Image == nil {\n\t\tt.Fatal(\"Missing user image.\")\n\t} else if !hasImg && result.User.Image != nil {\n\t\tt.Fatal(\"User has an image but shouldn't\")\n\t}\n\n\tif !reflect.DeepEqual(expectedProofs, idUI.Proofs) {\n\t\tt.Fatal(\"Wrong proofs.\", expectedProofs, idUI.Proofs)\n\t}\n}\n\nfunc checkDisplayKeys(t *testing.T, idUI *FakeIdentifyUI, callCount, keyCount int) {\n\tif idUI.DisplayKeyCalls != callCount {\n\t\tt.Errorf(\"DisplayKey calls: %d. expected %d.\", idUI.DisplayKeyCalls, callCount)\n\t}\n\n\tif len(idUI.Keys) != keyCount {\n\t\tt.Errorf(\"keys: %d, expected %d.\", len(idUI.Keys), keyCount)\n\t\tfor k, v := range idUI.Keys {\n\t\t\tt.Logf(\"key: %+v, %+v\", k, v)\n\t\t}\n\t}\n\n\tfor k := range idUI.Keys {\n\t\tif k.PgpFingerprint == nil {\n\t\t\tt.Errorf(\"key %v: not pgp. only pgp keys should be displayed.\", k)\n\t\t}\n\t}\n}\n\nfunc TestIdAlice(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_alice\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckAliceProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdBob(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_bob\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckBobProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdCharlie(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_charlie\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckCharlieProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdDoug(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_doug\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckDougProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdEllen(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, _, err := runIdentify(\"t_ellen\")\n\tif err == nil {\n\t\tt.Fatal(\"Expected no public key found error.\")\n\t} else if _, ok := err.(libkb.NoKeyError); !ok {\n\t\tt.Fatal(\"Expected no public key found error. Got instead:\", err)\n\t}\n\tcheckDisplayKeys(t, idUI, 0, 0)\n}\n\n\/\/ TestIdPGPNotEldest creates a user with a pgp key that isn't\n\/\/ eldest key, then runs identify to make sure the pgp key is\n\/\/ still displayed.\nfunc TestIdPGPNotEldest(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\n\t\/\/ create new user, then add pgp key\n\tu := CreateAndSignupFakeUser(t, \"login\")\n\tsecui := libkb.TestSecretUI{Passphrase: u.Passphrase}\n\tctx := &Context{LogUI: G.UI.GetLogUI(), SecretUI: secui}\n\tkey := armorKey(t, tc, u.Email)\n\te := NewPGPSaveArmored(key, true, true)\n\tif err := RunEngine(e, ctx, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tidUI, _, err := runIdentify(u.Username)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\ntype FakeIdentifyUI struct {\n\tProofs map[string]string\n\tUser *keybase_1.User\n\tFapr keybase_1.FinishAndPromptRes\n\tKeys map[keybase_1.FOKID]*keybase_1.TrackDiff\n\tDisplayKeyCalls int\n}\n\nfunc (ui *FakeIdentifyUI) FinishWebProofCheck(proof keybase_1.RemoteProof, result keybase_1.LinkCheckResult) {\n\tui.Proofs[proof.Key] = proof.Value\n}\nfunc (ui *FakeIdentifyUI) FinishSocialProofCheck(proof keybase_1.RemoteProof, result keybase_1.LinkCheckResult) {\n\tui.Proofs[proof.Key] = proof.Value\n}\nfunc (ui *FakeIdentifyUI) FinishAndPrompt(*keybase_1.IdentifyOutcome) (res keybase_1.FinishAndPromptRes, err error) {\n\tres = ui.Fapr\n\treturn\n}\nfunc (ui *FakeIdentifyUI) DisplayCryptocurrency(keybase_1.Cryptocurrency) {\n}\nfunc (ui *FakeIdentifyUI) DisplayKey(kid keybase_1.FOKID, td *keybase_1.TrackDiff) {\n\tif ui.Keys == nil {\n\t\tui.Keys = make(map[keybase_1.FOKID]*keybase_1.TrackDiff)\n\t}\n\tui.Keys[kid] = td\n\tui.DisplayKeyCalls++\n}\nfunc (ui *FakeIdentifyUI) ReportLastTrack(*keybase_1.TrackSummary) {\n}\nfunc (ui *FakeIdentifyUI) Start(username string) {\n}\nfunc (ui *FakeIdentifyUI) LaunchNetworkChecks(id *keybase_1.Identity, user *keybase_1.User) {\n\tui.User = user\n}\nfunc (ui *FakeIdentifyUI) DisplayTrackStatement(string) (err error) {\n\treturn\n}\nfunc (ui *FakeIdentifyUI) SetStrict(b bool) {\n}\n<commit_msg>nil map checks<commit_after>package engine\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase_1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\nfunc runIdentify(username string) (idUI *FakeIdentifyUI, res *IdentifyRes, err error) {\n\tidUI = &FakeIdentifyUI{Proofs: make(map[string]string)}\n\targ := IdentifyEngineArg{\n\t\tUser: username,\n\t}\n\tctx := Context{\n\t\tLogUI: G.UI.GetLogUI(),\n\t\tIdentifyUI: idUI,\n\t}\n\teng := NewIdentifyEngine(&arg)\n\terr = RunEngine(eng, &ctx, nil, nil)\n\tres = eng.Result()\n\treturn\n}\n\nfunc checkAliceProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"alice\", true, map[string]string{\n\t\t\"github\": \"kbtester2\",\n\t\t\"twitter\": \"tacovontaco\",\n\t})\n}\n\nfunc checkBobProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"bob\", true, map[string]string{\n\t\t\"github\": \"kbtester1\",\n\t\t\"twitter\": \"kbtester1\",\n\t})\n}\n\nfunc checkCharlieProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"charlie\", true, map[string]string{\n\t\t\"github\": \"tacoplusplus\",\n\t\t\"twitter\": \"tacovontaco\",\n\t})\n}\n\nfunc checkDougProofs(t *testing.T, idUI *FakeIdentifyUI, res *IdentifyRes) {\n\tcheckKeyedProfile(t, idUI, res, \"doug\", false, map[string]string{})\n}\n\nfunc checkKeyedProfile(t *testing.T, idUI *FakeIdentifyUI, result *IdentifyRes, name string, hasImg bool, expectedProofs map[string]string) {\n\tif exported := result.User.Export(); !reflect.DeepEqual(idUI.User, exported) {\n\t\tt.Fatal(\"LaunchNetworkChecks User not equal to result user.\", idUI.User, exported)\n\t}\n\n\tif hasImg && result.User.Image == nil {\n\t\tt.Fatal(\"Missing user image.\")\n\t} else if !hasImg && result.User.Image != nil {\n\t\tt.Fatal(\"User has an image but shouldn't\")\n\t}\n\n\tif !reflect.DeepEqual(expectedProofs, idUI.Proofs) {\n\t\tt.Fatal(\"Wrong proofs.\", expectedProofs, idUI.Proofs)\n\t}\n}\n\nfunc checkDisplayKeys(t *testing.T, idUI *FakeIdentifyUI, callCount, keyCount int) {\n\tif idUI.DisplayKeyCalls != callCount {\n\t\tt.Errorf(\"DisplayKey calls: %d. expected %d.\", idUI.DisplayKeyCalls, callCount)\n\t}\n\n\tif len(idUI.Keys) != keyCount {\n\t\tt.Errorf(\"keys: %d, expected %d.\", len(idUI.Keys), keyCount)\n\t\tfor k, v := range idUI.Keys {\n\t\t\tt.Logf(\"key: %+v, %+v\", k, v)\n\t\t}\n\t}\n\n\tfor k := range idUI.Keys {\n\t\tif k.PgpFingerprint == nil {\n\t\t\tt.Errorf(\"key %v: not pgp. only pgp keys should be displayed.\", k)\n\t\t}\n\t}\n}\n\nfunc TestIdAlice(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_alice\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckAliceProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdBob(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_bob\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckBobProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdCharlie(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_charlie\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckCharlieProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdDoug(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, result, err := runIdentify(\"t_doug\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckDougProofs(t, idUI, result)\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\nfunc TestIdEllen(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\tidUI, _, err := runIdentify(\"t_ellen\")\n\tif err == nil {\n\t\tt.Fatal(\"Expected no public key found error.\")\n\t} else if _, ok := err.(libkb.NoKeyError); !ok {\n\t\tt.Fatal(\"Expected no public key found error. Got instead:\", err)\n\t}\n\tcheckDisplayKeys(t, idUI, 0, 0)\n}\n\n\/\/ TestIdPGPNotEldest creates a user with a pgp key that isn't\n\/\/ eldest key, then runs identify to make sure the pgp key is\n\/\/ still displayed.\nfunc TestIdPGPNotEldest(t *testing.T) {\n\ttc := SetupEngineTest(t, \"id\")\n\tdefer tc.Cleanup()\n\n\t\/\/ create new user, then add pgp key\n\tu := CreateAndSignupFakeUser(t, \"login\")\n\tsecui := libkb.TestSecretUI{Passphrase: u.Passphrase}\n\tctx := &Context{LogUI: G.UI.GetLogUI(), SecretUI: secui}\n\tkey := armorKey(t, tc, u.Email)\n\te := NewPGPSaveArmored(key, true, true)\n\tif err := RunEngine(e, ctx, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tidUI, _, err := runIdentify(u.Username)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcheckDisplayKeys(t, idUI, 1, 1)\n}\n\ntype FakeIdentifyUI struct {\n\tProofs map[string]string\n\tUser *keybase_1.User\n\tFapr keybase_1.FinishAndPromptRes\n\tKeys map[keybase_1.FOKID]*keybase_1.TrackDiff\n\tDisplayKeyCalls int\n}\n\nfunc (ui *FakeIdentifyUI) FinishWebProofCheck(proof keybase_1.RemoteProof, result keybase_1.LinkCheckResult) {\n\tif ui.Proofs == nil {\n\t\tui.Proofs = make(map[string]string)\n\t}\n\tui.Proofs[proof.Key] = proof.Value\n}\nfunc (ui *FakeIdentifyUI) FinishSocialProofCheck(proof keybase_1.RemoteProof, result keybase_1.LinkCheckResult) {\n\tif ui.Proofs == nil {\n\t\tui.Proofs = make(map[string]string)\n\t}\n\tui.Proofs[proof.Key] = proof.Value\n}\nfunc (ui *FakeIdentifyUI) FinishAndPrompt(*keybase_1.IdentifyOutcome) (res keybase_1.FinishAndPromptRes, err error) {\n\tres = ui.Fapr\n\treturn\n}\nfunc (ui *FakeIdentifyUI) DisplayCryptocurrency(keybase_1.Cryptocurrency) {\n}\nfunc (ui *FakeIdentifyUI) DisplayKey(kid keybase_1.FOKID, td *keybase_1.TrackDiff) {\n\tif ui.Keys == nil {\n\t\tui.Keys = make(map[keybase_1.FOKID]*keybase_1.TrackDiff)\n\t}\n\tui.Keys[kid] = td\n\tui.DisplayKeyCalls++\n}\nfunc (ui *FakeIdentifyUI) ReportLastTrack(*keybase_1.TrackSummary) {\n}\nfunc (ui *FakeIdentifyUI) Start(username string) {\n}\nfunc (ui *FakeIdentifyUI) LaunchNetworkChecks(id *keybase_1.Identity, user *keybase_1.User) {\n\tui.User = user\n}\nfunc (ui *FakeIdentifyUI) DisplayTrackStatement(string) (err error) {\n\treturn\n}\nfunc (ui *FakeIdentifyUI) SetStrict(b bool) {\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"bufio\"\n \"io\"\n \"fmt\"\n \"log\"\n \"os\"\n \"path\"\n)\n\nfunc main() {\n if len(os.Args) != 3 {\n log.Fatalf(\"Usage: %s input-file output-file\", path.Base(os.Args[0]))\n }\n\n i, err := os.Open(os.Args[1])\n if err != nil {\n log.Fatalf(\"Cannot open %q for reading: %v\", os.Args[1], err)\n }\n defer i.Close()\n r := bufio.NewReader(i)\n\n o, err := os.Create(os.Args[2])\n if err != nil {\n log.Fatalf(\"Cannot create new file %q: %v\", os.Args[2], err)\n }\n defer o.Close()\n w := bufio.NewWriter(o)\n defer w.Flush()\n\n for {\n err = processBlock(r, w)\n if err == io.EOF {\n log.Println(\"Successfully reached EOF\")\n break\n }\n if err != nil {\n log.Fatalf(\"Error whilst processing file: %v\", err)\n }\n }\n}\n\nfunc processBlock(r *bufio.Reader, w *bufio.Writer) error {\n var err error\n start := true\n for {\n buf, err := r.Peek(1)\n if err != nil {\n return err\n }\n if start && (buf[0] != '0') {\n return fmt.Errorf(\"expecting %q, got %q\", '0', buf[0])\n }\n if (!start) && (buf[0] == '0') {\n break\n }\n\n line, err := r.ReadString('\\n')\n if len(line) > 0 {\n if _, werr := w.WriteString(line); err != nil {\n return werr\n }\n }\n\n start = false\n\n if err == io.EOF {\n break\n }\n if err != nil {\n return err\n }\n }\n return err\n}\n<commit_msg>Detect INDI record types and buffer data before writing (but still do not hide any).<commit_after>package main\n\nimport (\n \"bufio\"\n \"io\"\n \"fmt\"\n \"log\"\n \"os\"\n \"path\"\n \"strings\"\n)\n\ntype ternary int\nconst (\n YES ternary = iota\n NO\n MAYBE\n)\n\nfunc main() {\n if len(os.Args) != 3 {\n log.Fatalf(\"Usage: %s input-file output-file\", path.Base(os.Args[0]))\n }\n\n i, err := os.Open(os.Args[1])\n if err != nil {\n log.Fatalf(\"Cannot open %q for reading: %v\", os.Args[1], err)\n }\n defer i.Close()\n r := bufio.NewReader(i)\n\n o, err := os.Create(os.Args[2])\n if err != nil {\n log.Fatalf(\"Cannot create new file %q: %v\", os.Args[2], err)\n }\n defer o.Close()\n w := bufio.NewWriter(o)\n defer w.Flush()\n\n for {\n err = processBlock(r, w)\n if err == io.EOF {\n log.Println(\"Successfully reached EOF\")\n break\n }\n if err != nil {\n log.Fatalf(\"Error whilst processing file: %v\", err)\n }\n }\n}\n\nfunc processBlock(r *bufio.Reader, w *bufio.Writer) error {\n var err error\n var buf strings.Builder\n isIndi := MAYBE\n\n for {\n ch, err := r.Peek(1)\n if err != nil {\n return err\n }\n if isIndi == MAYBE && ch[0] != '0' {\n return fmt.Errorf(\"expecting %q, got %q\", '0', ch[0])\n }\n if isIndi != MAYBE && ch[0] == '0' {\n break\n }\n\n line, err := r.ReadString('\\n')\n\n if isIndi == MAYBE {\n if strings.HasSuffix(line, \"INDI\\n\") {\n isIndi = YES\n } else {\n isIndi = NO\n }\n }\n\n if isIndi == NO {\n if _, werr := w.WriteString(line); werr != nil {\n return werr\n }\n } else {\n buf.WriteString(line)\n }\n\n if err == io.EOF {\n break\n }\n if err != nil {\n return err\n }\n }\n\n if _, werr := w.WriteString(buf.String()); werr != nil {\n return werr\n }\n\n return err\n}\n<|endoftext|>"} {"text":"<commit_before>package graph\n\nimport (\n\t\"testing\"\n)\n\nfunc TestTopologicalSort(t *testing.T) {\n\tgraph, err := New(\"directed\")\n\tif err != nil {\n\t\tt.Errorf(\"TestMakeNode: unable to create directed graph\")\n\t}\n\tnodes := graph.Nodes()\n\t\/\/ create graph on page 613 of CLRS ed. 3\n\tnodes = append(nodes, graph.MakeNode(\"shirt\"))\n\tnodes = append(nodes, graph.MakeNode(\"tie\"))\n\tnodes = append(nodes, graph.MakeNode(\"jacket\"))\n\tnodes = append(nodes, graph.MakeNode(\"belt\"))\n\tnodes = append(nodes, graph.MakeNode(\"watch\"))\n\tnodes = append(nodes, graph.MakeNode(\"undershorts\"))\n\tnodes = append(nodes, graph.MakeNode(\"pants\"))\n\tnodes = append(nodes, graph.MakeNode(\"shoes\"))\n\tnodes = append(nodes, graph.MakeNode(\"socks\"))\n\tgraph.Connect(nodes[0], nodes[1])\n\tgraph.Connect(nodes[1], nodes[2])\n\tgraph.Connect(nodes[0], nodes[3])\n\tgraph.Connect(nodes[3], nodes[2])\n\tgraph.Connect(nodes[5], nodes[6])\n\tgraph.Connect(nodes[5], nodes[7])\n\tgraph.Connect(nodes[6], nodes[3])\n\tgraph.Connect(nodes[6], nodes[7])\n\tgraph.Connect(nodes[8], nodes[7])\n\tgraph.verify(t)\n\twantOrder := make([]*Node, len(graph.Nodes()))\n\twantOrder[0] = nodes[8]\n\twantOrder[1] = nodes[5]\n\twantOrder[2] = nodes[6]\n\twantOrder[3] = nodes[7]\n\twantOrder[4] = nodes[4]\n\twantOrder[5] = nodes[0]\n\twantOrder[6] = nodes[3]\n\twantOrder[7] = nodes[1]\n\twantOrder[8] = nodes[2]\n\tresult := TopologicalSort(graph)\n\tfor i := range result {\n\t\tif result[i] != wantOrder[i] {\n\t\t\tt.Errorf(\"index %v in result != wanted, value: %v, want value: %v\", i, result[i].Value, wantOrder[i].Value)\n\t\t}\n\t}\n}\n<commit_msg>added comments for sorted ordering<commit_after>package graph\n\nimport (\n\t\"testing\"\n)\n\nfunc TestTopologicalSort(t *testing.T) {\n\tgraph, err := New(\"directed\")\n\tif err != nil {\n\t\tt.Errorf(\"TestMakeNode: unable to create directed graph\")\n\t}\n\tnodes := graph.Nodes()\n\t\/\/ create graph on page 613 of CLRS ed. 3\n\tnodes = append(nodes, graph.MakeNode(\"shirt\"))\n\tnodes = append(nodes, graph.MakeNode(\"tie\"))\n\tnodes = append(nodes, graph.MakeNode(\"jacket\"))\n\tnodes = append(nodes, graph.MakeNode(\"belt\"))\n\tnodes = append(nodes, graph.MakeNode(\"watch\"))\n\tnodes = append(nodes, graph.MakeNode(\"undershorts\"))\n\tnodes = append(nodes, graph.MakeNode(\"pants\"))\n\tnodes = append(nodes, graph.MakeNode(\"shoes\"))\n\tnodes = append(nodes, graph.MakeNode(\"socks\"))\n\tgraph.Connect(nodes[0], nodes[1])\n\tgraph.Connect(nodes[1], nodes[2])\n\tgraph.Connect(nodes[0], nodes[3])\n\tgraph.Connect(nodes[3], nodes[2])\n\tgraph.Connect(nodes[5], nodes[6])\n\tgraph.Connect(nodes[5], nodes[7])\n\tgraph.Connect(nodes[6], nodes[3])\n\tgraph.Connect(nodes[6], nodes[7])\n\tgraph.Connect(nodes[8], nodes[7])\n\tgraph.verify(t)\n\twantOrder := make([]*Node, len(graph.Nodes()))\n\twantOrder[0] = nodes[8] \/\/ socks\n\twantOrder[1] = nodes[5] \/\/ undershorts\n\twantOrder[2] = nodes[6] \/\/ pants\n\twantOrder[3] = nodes[7] \/\/ shoes\n\twantOrder[4] = nodes[4] \/\/ watch\n\twantOrder[5] = nodes[0] \/\/ shirt\n\twantOrder[6] = nodes[3] \/\/ belt\n\twantOrder[7] = nodes[1] \/\/ tie\n\twantOrder[8] = nodes[2] \/\/ jacket\n\tresult := TopologicalSort(graph)\n\tfor i := range result {\n\t\tif result[i] != wantOrder[i] {\n\t\t\tt.Errorf(\"index %v in result != wanted, value: %v, want value: %v\", i, result[i].Value, wantOrder[i].Value)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage importer\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.tools\/go\/gcimporter\"\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n)\n\nvar tests = []string{\n\t`package p`,\n\n\t\/\/ consts\n\t`package p; const X = true`,\n\t`package p; const X, y, Z = true, false, 0 != 0`,\n\t`package p; const ( A float32 = 1<<iota; B; C; D)`,\n\t`package p; const X = \"foo\"`,\n\t`package p; const X string = \"foo\"`,\n\t`package p; const X = 0`,\n\t`package p; const X = -42`,\n\t`package p; const X = 3.14159265`,\n\t`package p; const X = -1e-10`,\n\t`package p; const X = 1.2 + 2.3i`,\n\t`package p; const X = -1i`,\n\t`package p; import \"math\"; const Pi = math.Pi`,\n\t`package p; import m \"math\"; const Pi = m.Pi`,\n\n\t\/\/ types\n\t`package p; type T int`,\n\t`package p; type T [10]int`,\n\t`package p; type T []int`,\n\t`package p; type T struct{}`,\n\t`package p; type T struct{x int}`,\n\t`package p; type T *int`,\n\t`package p; type T func()`,\n\t`package p; type T *T`,\n\t`package p; type T interface{}`,\n\t`package p; type T interface{ foo() }`,\n\t`package p; type T interface{ m() T }`,\n\t\/\/ TODO(gri) disabled for now - import\/export works but\n\t\/\/ types.Type.String() used in the test cannot handle cases\n\t\/\/ like this yet\n\t\/\/ `package p; type T interface{ m() interface{T} }`,\n\t`package p; type T map[string]bool`,\n\t`package p; type T chan int`,\n\t`package p; type T <-chan complex64`,\n\t`package p; type T chan<- map[int]string`,\n\t\/\/ test case for issue 8177\n\t`package p; type T1 interface { F(T2) }; type T2 interface { T1 }`,\n\n\t\/\/ vars\n\t`package p; var X int`,\n\t`package p; var X, Y, Z struct{f int \"tag\"}`,\n\n\t\/\/ funcs\n\t`package p; func F()`,\n\t`package p; func F(x int, y struct{}) bool`,\n\t`package p; type T int; func (*T) F(x int, y struct{}) T`,\n\n\t\/\/ selected special cases\n\t`package p; type T int`,\n\t`package p; type T uint8`,\n\t`package p; type T byte`,\n\t`package p; type T error`,\n\t`package p; import \"net\/http\"; type T http.Client`,\n\t`package p; import \"net\/http\"; type ( T1 http.Client; T2 struct { http.Client } )`,\n\t`package p; import \"unsafe\"; type ( T1 unsafe.Pointer; T2 unsafe.Pointer )`,\n\t`package p; import \"unsafe\"; type T struct { p unsafe.Pointer }`,\n}\n\nfunc TestImportSrc(t *testing.T) {\n\tfor _, src := range tests {\n\t\tpkg, err := pkgForSource(src)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"typecheck failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\ttestExportImport(t, pkg, \"\")\n\t}\n}\n\nfunc TestImportStdLib(t *testing.T) {\n\tstart := time.Now()\n\n\tlibs, err := stdLibs()\n\tif err != nil {\n\t\tt.Fatalf(\"could not compute list of std libraries: %s\", err)\n\t}\n\n\t\/\/ make sure printed go\/types types and gc-imported types\n\t\/\/ can be compared reasonably well\n\ttypes.GcCompatibilityMode = true\n\n\tvar totSize, totGcSize int\n\tfor _, lib := range libs {\n\t\t\/\/ limit run time for short tests\n\t\tif testing.Short() && time.Since(start) >= 750*time.Millisecond {\n\t\t\treturn\n\t\t}\n\n\t\tpkg, err := pkgForPath(lib)\n\t\tswitch err := err.(type) {\n\t\tcase nil:\n\t\t\t\/\/ ok\n\t\tcase *build.NoGoError:\n\t\t\t\/\/ no Go files - ignore\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tt.Errorf(\"typecheck failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tsize, gcsize := testExportImport(t, pkg, lib)\n\t\tif gcsize == 0 {\n\t\t\t\/\/ if gc import didn't happen, assume same size\n\t\t\t\/\/ (and avoid division by zero below)\n\t\t\tgcsize = size\n\t\t}\n\n\t\tif testing.Verbose() {\n\t\t\tfmt.Printf(\"%s\\t%d\\t%d\\t%d%%\\n\", lib, size, gcsize, int(float64(size)*100\/float64(gcsize)))\n\t\t}\n\t\ttotSize += size\n\t\ttotGcSize += gcsize\n\t}\n\n\tif testing.Verbose() {\n\t\tfmt.Printf(\"\\n%d\\t%d\\t%d%%\\n\", totSize, totGcSize, int(float64(totSize)*100\/float64(totGcSize)))\n\t}\n\n\ttypes.GcCompatibilityMode = false\n}\n\nfunc testExportImport(t *testing.T, pkg0 *types.Package, path string) (size, gcsize int) {\n\tdata := ExportData(pkg0)\n\tsize = len(data)\n\n\timports := make(map[string]*types.Package)\n\tn, pkg1, err := ImportData(imports, data)\n\tif err != nil {\n\t\tt.Errorf(\"package %s: import failed: %s\", pkg0.Name(), err)\n\t\treturn\n\t}\n\tif n != size {\n\t\tt.Errorf(\"package %s: not all input data consumed\", pkg0.Name())\n\t\treturn\n\t}\n\n\ts0 := pkgString(pkg0)\n\ts1 := pkgString(pkg1)\n\tif s1 != s0 {\n\t\tt.Errorf(\"package %s: \\nimport got:\\n%s\\nwant:\\n%s\\n\", pkg0.Name(), s1, s0)\n\t}\n\n\t\/\/ If we have a standard library, compare also against the gcimported package.\n\tif path == \"\" {\n\t\treturn \/\/ not std library\n\t}\n\n\tgcdata, err := gcExportData(path)\n\tgcsize = len(gcdata)\n\n\timports = make(map[string]*types.Package)\n\tpkg2, err := gcImportData(imports, gcdata, path)\n\tif err != nil {\n\t\tt.Errorf(\"package %s: gcimport failed: %s\", pkg0.Name(), err)\n\t\treturn\n\t}\n\n\ts2 := pkgString(pkg2)\n\tif s2 != s0 {\n\t\tt.Errorf(\"package %s: \\ngcimport got:\\n%s\\nwant:\\n%s\\n\", pkg0.Name(), s2, s0)\n\t}\n\n\treturn\n}\n\nfunc pkgForSource(src string) (*types.Package, error) {\n\t\/\/ parse file\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, \"\", src, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ typecheck file\n\tconf := types.Config{\n\t\t\/\/ strconv exports IntSize as a constant. The type-checker must\n\t\t\/\/ use the same word size otherwise the result of the type-checker\n\t\t\/\/ and gc imports is different. We don't care about alignment\n\t\t\/\/ since none of the tests have exported constants depending\n\t\t\/\/ on alignment (see also issue 8366).\n\t\tSizes: &types.StdSizes{WordSize: strconv.IntSize \/ 8, MaxAlign: 8},\n\t}\n\treturn conf.Check(\"import-test\", fset, []*ast.File{f}, nil)\n}\n\nfunc pkgForPath(path string) (*types.Package, error) {\n\t\/\/ collect filenames\n\tctxt := build.Default\n\tpkginfo, err := ctxt.Import(path, \"\", 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilenames := append(pkginfo.GoFiles, pkginfo.CgoFiles...)\n\n\t\/\/ parse files\n\tfset := token.NewFileSet()\n\tfiles := make([]*ast.File, len(filenames))\n\tfor i, filename := range filenames {\n\t\tvar err error\n\t\tfiles[i], err = parser.ParseFile(fset, filepath.Join(pkginfo.Dir, filename), nil, 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ typecheck files\n\t\/\/ (we only care about exports and thus can ignore function bodies)\n\tconf := types.Config{IgnoreFuncBodies: true, FakeImportC: true}\n\treturn conf.Check(path, fset, files, nil)\n}\n\n\/\/ pkgString returns a string representation of a package's exported interface.\nfunc pkgString(pkg *types.Package) string {\n\tvar buf bytes.Buffer\n\n\tfmt.Fprintf(&buf, \"package %s\\n\", pkg.Name())\n\n\tscope := pkg.Scope()\n\tfor _, name := range scope.Names() {\n\t\tif exported(name) {\n\t\t\tobj := scope.Lookup(name)\n\t\t\tbuf.WriteString(obj.String())\n\n\t\t\tswitch obj := obj.(type) {\n\t\t\tcase *types.Const:\n\t\t\t\t\/\/ For now only print constant values if they are not float\n\t\t\t\t\/\/ or complex. This permits comparing go\/types results with\n\t\t\t\t\/\/ gc-generated gcimported package interfaces.\n\t\t\t\tinfo := obj.Type().Underlying().(*types.Basic).Info()\n\t\t\t\tif info&types.IsFloat == 0 && info&types.IsComplex == 0 {\n\t\t\t\t\tfmt.Fprintf(&buf, \" = %s\", obj.Val())\n\t\t\t\t}\n\n\t\t\tcase *types.TypeName:\n\t\t\t\t\/\/ Print associated methods.\n\t\t\t\t\/\/ Basic types (e.g., unsafe.Pointer) have *types.Basic\n\t\t\t\t\/\/ type rather than *types.Named; so we need to check.\n\t\t\t\tif typ, _ := obj.Type().(*types.Named); typ != nil {\n\t\t\t\t\tif n := typ.NumMethods(); n > 0 {\n\t\t\t\t\t\t\/\/ Sort methods by name so that we get the\n\t\t\t\t\t\t\/\/ same order independent of whether the\n\t\t\t\t\t\t\/\/ methods got imported or coming directly\n\t\t\t\t\t\t\/\/ for the source.\n\t\t\t\t\t\t\/\/ TODO(gri) This should probably be done\n\t\t\t\t\t\t\/\/ in go\/types.\n\t\t\t\t\t\tlist := make([]*types.Func, n)\n\t\t\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\t\t\tlist[i] = typ.Method(i)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsort.Sort(byName(list))\n\n\t\t\t\t\t\tbuf.WriteString(\"\\nmethods (\\n\")\n\t\t\t\t\t\tfor _, m := range list {\n\t\t\t\t\t\t\tfmt.Fprintf(&buf, \"\\t%s\\n\", m)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbuf.WriteString(\")\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteByte('\\n')\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\nvar stdLibRoot = filepath.Join(runtime.GOROOT(), \"src\", \"pkg\") + string(filepath.Separator)\n\n\/\/ The following std libraries are excluded from the stdLibs list.\nvar excluded = map[string]bool{\n\t\"builtin\": true, \/\/ contains type declarations with cycles\n\t\"unsafe\": true, \/\/ contains fake declarations\n}\n\n\/\/ stdLibs returns the list if standard library package paths.\nfunc stdLibs() (list []string, err error) {\n\terr = filepath.Walk(stdLibRoot, func(path string, info os.FileInfo, err error) error {\n\t\tif err == nil && info.IsDir() {\n\t\t\tif info.Name() == \"testdata\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tpkgPath := path[len(stdLibRoot):] \/\/ remove stdLibRoot\n\t\t\tif len(pkgPath) > 0 && !excluded[pkgPath] {\n\t\t\t\tlist = append(list, pkgPath)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}\n\ntype byName []*types.Func\n\nfunc (a byName) Len() int { return len(a) }\nfunc (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }\n\n\/\/ gcExportData returns the gc-generated export data for the given path.\n\/\/ It is based on a trimmed-down version of gcimporter.Import which does\n\/\/ not do the actual import, does not handle package unsafe, and assumes\n\/\/ that path is a correct standard library package path (no canonicalization,\n\/\/ or handling of local import paths).\nfunc gcExportData(path string) ([]byte, error) {\n\tfilename, id := gcimporter.FindPkg(path, \"\")\n\tif filename == \"\" {\n\t\treturn nil, fmt.Errorf(\"can't find import: %s\", path)\n\t}\n\tif id != path {\n\t\tpanic(\"path should be canonicalized\")\n\t}\n\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tbuf := bufio.NewReader(f)\n\tif err = gcimporter.FindExportData(buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data []byte\n\tfor {\n\t\tline, err := buf.ReadBytes('\\n')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata = append(data, line...)\n\t\t\/\/ export data ends in \"$$\\n\"\n\t\tif len(line) == 3 && line[0] == '$' && line[1] == '$' {\n\t\t\treturn data, nil\n\t\t}\n\t}\n}\n\nfunc gcImportData(imports map[string]*types.Package, data []byte, path string) (*types.Package, error) {\n\tfilename := fmt.Sprintf(\"<filename for %s>\", path) \/\/ so we have a decent error message if necessary\n\treturn gcimporter.ImportData(imports, filename, path, bufio.NewReader(bytes.NewBuffer(data)))\n}\n<commit_msg>go.tools\/go\/importer: fix test (src\/pkg -> src)<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage importer\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.tools\/go\/gcimporter\"\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n)\n\nvar tests = []string{\n\t`package p`,\n\n\t\/\/ consts\n\t`package p; const X = true`,\n\t`package p; const X, y, Z = true, false, 0 != 0`,\n\t`package p; const ( A float32 = 1<<iota; B; C; D)`,\n\t`package p; const X = \"foo\"`,\n\t`package p; const X string = \"foo\"`,\n\t`package p; const X = 0`,\n\t`package p; const X = -42`,\n\t`package p; const X = 3.14159265`,\n\t`package p; const X = -1e-10`,\n\t`package p; const X = 1.2 + 2.3i`,\n\t`package p; const X = -1i`,\n\t`package p; import \"math\"; const Pi = math.Pi`,\n\t`package p; import m \"math\"; const Pi = m.Pi`,\n\n\t\/\/ types\n\t`package p; type T int`,\n\t`package p; type T [10]int`,\n\t`package p; type T []int`,\n\t`package p; type T struct{}`,\n\t`package p; type T struct{x int}`,\n\t`package p; type T *int`,\n\t`package p; type T func()`,\n\t`package p; type T *T`,\n\t`package p; type T interface{}`,\n\t`package p; type T interface{ foo() }`,\n\t`package p; type T interface{ m() T }`,\n\t\/\/ TODO(gri) disabled for now - import\/export works but\n\t\/\/ types.Type.String() used in the test cannot handle cases\n\t\/\/ like this yet\n\t\/\/ `package p; type T interface{ m() interface{T} }`,\n\t`package p; type T map[string]bool`,\n\t`package p; type T chan int`,\n\t`package p; type T <-chan complex64`,\n\t`package p; type T chan<- map[int]string`,\n\t\/\/ test case for issue 8177\n\t`package p; type T1 interface { F(T2) }; type T2 interface { T1 }`,\n\n\t\/\/ vars\n\t`package p; var X int`,\n\t`package p; var X, Y, Z struct{f int \"tag\"}`,\n\n\t\/\/ funcs\n\t`package p; func F()`,\n\t`package p; func F(x int, y struct{}) bool`,\n\t`package p; type T int; func (*T) F(x int, y struct{}) T`,\n\n\t\/\/ selected special cases\n\t`package p; type T int`,\n\t`package p; type T uint8`,\n\t`package p; type T byte`,\n\t`package p; type T error`,\n\t`package p; import \"net\/http\"; type T http.Client`,\n\t`package p; import \"net\/http\"; type ( T1 http.Client; T2 struct { http.Client } )`,\n\t`package p; import \"unsafe\"; type ( T1 unsafe.Pointer; T2 unsafe.Pointer )`,\n\t`package p; import \"unsafe\"; type T struct { p unsafe.Pointer }`,\n}\n\nfunc TestImportSrc(t *testing.T) {\n\tfor _, src := range tests {\n\t\tpkg, err := pkgForSource(src)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"typecheck failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\ttestExportImport(t, pkg, \"\")\n\t}\n}\n\nfunc TestImportStdLib(t *testing.T) {\n\tstart := time.Now()\n\n\tlibs, err := stdLibs()\n\tif err != nil {\n\t\tt.Fatalf(\"could not compute list of std libraries: %s\", err)\n\t}\n\tif len(libs) < 100 {\n\t\tt.Fatalf(\"only %d std libraries found - something's not right\", len(libs))\n\t}\n\n\t\/\/ make sure printed go\/types types and gc-imported types\n\t\/\/ can be compared reasonably well\n\ttypes.GcCompatibilityMode = true\n\n\tvar totSize, totGcSize int\n\tfor _, lib := range libs {\n\t\t\/\/ limit run time for short tests\n\t\tif testing.Short() && time.Since(start) >= 750*time.Millisecond {\n\t\t\treturn\n\t\t}\n\n\t\tpkg, err := pkgForPath(lib)\n\t\tswitch err := err.(type) {\n\t\tcase nil:\n\t\t\t\/\/ ok\n\t\tcase *build.NoGoError:\n\t\t\t\/\/ no Go files - ignore\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tt.Errorf(\"typecheck failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tsize, gcsize := testExportImport(t, pkg, lib)\n\t\tif gcsize == 0 {\n\t\t\t\/\/ if gc import didn't happen, assume same size\n\t\t\t\/\/ (and avoid division by zero below)\n\t\t\tgcsize = size\n\t\t}\n\n\t\tif testing.Verbose() {\n\t\t\tfmt.Printf(\"%s\\t%d\\t%d\\t%d%%\\n\", lib, size, gcsize, int(float64(size)*100\/float64(gcsize)))\n\t\t}\n\t\ttotSize += size\n\t\ttotGcSize += gcsize\n\t}\n\n\tif testing.Verbose() {\n\t\tfmt.Printf(\"\\n%d\\t%d\\t%d%%\\n\", totSize, totGcSize, int(float64(totSize)*100\/float64(totGcSize)))\n\t}\n\n\ttypes.GcCompatibilityMode = false\n}\n\nfunc testExportImport(t *testing.T, pkg0 *types.Package, path string) (size, gcsize int) {\n\tdata := ExportData(pkg0)\n\tsize = len(data)\n\n\timports := make(map[string]*types.Package)\n\tn, pkg1, err := ImportData(imports, data)\n\tif err != nil {\n\t\tt.Errorf(\"package %s: import failed: %s\", pkg0.Name(), err)\n\t\treturn\n\t}\n\tif n != size {\n\t\tt.Errorf(\"package %s: not all input data consumed\", pkg0.Name())\n\t\treturn\n\t}\n\n\ts0 := pkgString(pkg0)\n\ts1 := pkgString(pkg1)\n\tif s1 != s0 {\n\t\tt.Errorf(\"package %s: \\nimport got:\\n%s\\nwant:\\n%s\\n\", pkg0.Name(), s1, s0)\n\t}\n\n\t\/\/ If we have a standard library, compare also against the gcimported package.\n\tif path == \"\" {\n\t\treturn \/\/ not std library\n\t}\n\n\tgcdata, err := gcExportData(path)\n\tif err != nil {\n\t\tif pkg0.Name() == \"main\" {\n\t\t\treturn \/\/ no export data present for main package\n\t\t}\n\t\tt.Errorf(\"package %s: couldn't get export data: %s\", pkg0.Name(), err)\n\t}\n\tgcsize = len(gcdata)\n\n\timports = make(map[string]*types.Package)\n\tpkg2, err := gcImportData(imports, gcdata, path)\n\tif err != nil {\n\t\tt.Errorf(\"package %s: gcimport failed: %s\", pkg0.Name(), err)\n\t\treturn\n\t}\n\n\ts2 := pkgString(pkg2)\n\tif s2 != s0 {\n\t\tt.Errorf(\"package %s: \\ngcimport got:\\n%s\\nwant:\\n%s\\n\", pkg0.Name(), s2, s0)\n\t}\n\n\treturn\n}\n\nfunc pkgForSource(src string) (*types.Package, error) {\n\t\/\/ parse file\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, \"\", src, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ typecheck file\n\tconf := types.Config{\n\t\t\/\/ strconv exports IntSize as a constant. The type-checker must\n\t\t\/\/ use the same word size otherwise the result of the type-checker\n\t\t\/\/ and gc imports is different. We don't care about alignment\n\t\t\/\/ since none of the tests have exported constants depending\n\t\t\/\/ on alignment (see also issue 8366).\n\t\tSizes: &types.StdSizes{WordSize: strconv.IntSize \/ 8, MaxAlign: 8},\n\t}\n\treturn conf.Check(\"import-test\", fset, []*ast.File{f}, nil)\n}\n\nfunc pkgForPath(path string) (*types.Package, error) {\n\t\/\/ collect filenames\n\tctxt := build.Default\n\tpkginfo, err := ctxt.Import(path, \"\", 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilenames := append(pkginfo.GoFiles, pkginfo.CgoFiles...)\n\n\t\/\/ parse files\n\tfset := token.NewFileSet()\n\tfiles := make([]*ast.File, len(filenames))\n\tfor i, filename := range filenames {\n\t\tvar err error\n\t\tfiles[i], err = parser.ParseFile(fset, filepath.Join(pkginfo.Dir, filename), nil, 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ typecheck files\n\t\/\/ (we only care about exports and thus can ignore function bodies)\n\tconf := types.Config{IgnoreFuncBodies: true, FakeImportC: true}\n\treturn conf.Check(path, fset, files, nil)\n}\n\n\/\/ pkgString returns a string representation of a package's exported interface.\nfunc pkgString(pkg *types.Package) string {\n\tvar buf bytes.Buffer\n\n\tfmt.Fprintf(&buf, \"package %s\\n\", pkg.Name())\n\n\tscope := pkg.Scope()\n\tfor _, name := range scope.Names() {\n\t\tif exported(name) {\n\t\t\tobj := scope.Lookup(name)\n\t\t\tbuf.WriteString(obj.String())\n\n\t\t\tswitch obj := obj.(type) {\n\t\t\tcase *types.Const:\n\t\t\t\t\/\/ For now only print constant values if they are not float\n\t\t\t\t\/\/ or complex. This permits comparing go\/types results with\n\t\t\t\t\/\/ gc-generated gcimported package interfaces.\n\t\t\t\tinfo := obj.Type().Underlying().(*types.Basic).Info()\n\t\t\t\tif info&types.IsFloat == 0 && info&types.IsComplex == 0 {\n\t\t\t\t\tfmt.Fprintf(&buf, \" = %s\", obj.Val())\n\t\t\t\t}\n\n\t\t\tcase *types.TypeName:\n\t\t\t\t\/\/ Print associated methods.\n\t\t\t\t\/\/ Basic types (e.g., unsafe.Pointer) have *types.Basic\n\t\t\t\t\/\/ type rather than *types.Named; so we need to check.\n\t\t\t\tif typ, _ := obj.Type().(*types.Named); typ != nil {\n\t\t\t\t\tif n := typ.NumMethods(); n > 0 {\n\t\t\t\t\t\t\/\/ Sort methods by name so that we get the\n\t\t\t\t\t\t\/\/ same order independent of whether the\n\t\t\t\t\t\t\/\/ methods got imported or coming directly\n\t\t\t\t\t\t\/\/ for the source.\n\t\t\t\t\t\t\/\/ TODO(gri) This should probably be done\n\t\t\t\t\t\t\/\/ in go\/types.\n\t\t\t\t\t\tlist := make([]*types.Func, n)\n\t\t\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\t\t\tlist[i] = typ.Method(i)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsort.Sort(byName(list))\n\n\t\t\t\t\t\tbuf.WriteString(\"\\nmethods (\\n\")\n\t\t\t\t\t\tfor _, m := range list {\n\t\t\t\t\t\t\tfmt.Fprintf(&buf, \"\\t%s\\n\", m)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbuf.WriteString(\")\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteByte('\\n')\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\nvar stdLibRoot = filepath.Join(runtime.GOROOT(), \"src\") + string(filepath.Separator)\n\n\/\/ The following std libraries are excluded from the stdLibs list.\nvar excluded = map[string]bool{\n\t\"builtin\": true, \/\/ contains type declarations with cycles\n\t\"unsafe\": true, \/\/ contains fake declarations\n}\n\n\/\/ stdLibs returns the list of standard library package paths.\nfunc stdLibs() (list []string, err error) {\n\terr = filepath.Walk(stdLibRoot, func(path string, info os.FileInfo, err error) error {\n\t\tif err == nil && info.IsDir() {\n\t\t\t\/\/ testdata directories don't contain importable libraries\n\t\t\tif info.Name() == \"testdata\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tpkgPath := path[len(stdLibRoot):] \/\/ remove stdLibRoot\n\t\t\tif len(pkgPath) > 0 && !excluded[pkgPath] {\n\t\t\t\tlist = append(list, pkgPath)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}\n\ntype byName []*types.Func\n\nfunc (a byName) Len() int { return len(a) }\nfunc (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }\n\n\/\/ gcExportData returns the gc-generated export data for the given path.\n\/\/ It is based on a trimmed-down version of gcimporter.Import which does\n\/\/ not do the actual import, does not handle package unsafe, and assumes\n\/\/ that path is a correct standard library package path (no canonicalization,\n\/\/ or handling of local import paths).\nfunc gcExportData(path string) ([]byte, error) {\n\tfilename, id := gcimporter.FindPkg(path, \"\")\n\tif filename == \"\" {\n\t\treturn nil, fmt.Errorf(\"can't find import: %s\", path)\n\t}\n\tif id != path {\n\t\tpanic(\"path should be canonicalized\")\n\t}\n\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tbuf := bufio.NewReader(f)\n\tif err = gcimporter.FindExportData(buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data []byte\n\tfor {\n\t\tline, err := buf.ReadBytes('\\n')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata = append(data, line...)\n\t\t\/\/ export data ends in \"$$\\n\"\n\t\tif len(line) == 3 && line[0] == '$' && line[1] == '$' {\n\t\t\treturn data, nil\n\t\t}\n\t}\n}\n\nfunc gcImportData(imports map[string]*types.Package, data []byte, path string) (*types.Package, error) {\n\tfilename := fmt.Sprintf(\"<filename for %s>\", path) \/\/ so we have a decent error message if necessary\n\treturn gcimporter.ImportData(imports, filename, path, bufio.NewReader(bytes.NewBuffer(data)))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage netutil\n\nimport (\n\t\"math\/rand\"\n\t\"net\"\n\t\"testing\"\n)\n\nfunc checkDistribution(t *testing.T, data []*net.SRV, margin float64) {\n\tsum := 0\n\tfor _, srv := range data {\n\t\tsum += int(srv.Weight)\n\t}\n\n\tresults := make(map[string]int)\n\n\tcount := 1000\n\tfor j := 0; j < count; j++ {\n\t\td := make([]*net.SRV, len(data))\n\t\tcopy(d, data)\n\t\tbyPriorityWeight(d).shuffleByWeight()\n\t\tkey := d[0].Target\n\t\tresults[key] = results[key] + 1\n\t}\n\n\tactual := results[data[0].Target]\n\texpected := float64(count) * float64(data[0].Weight) \/ float64(sum)\n\tdiff := float64(actual) - expected\n\tt.Logf(\"actual: %v diff: %v e: %v m: %v\", actual, diff, expected, margin)\n\tif diff < 0 {\n\t\tdiff = -diff\n\t}\n\tif diff > (expected * margin) {\n\t\tt.Errorf(\"missed target weight: expected %v, %v\", expected, actual)\n\t}\n}\n\nfunc testUniformity(t *testing.T, size int, margin float64) {\n\trand.Seed(1)\n\tdata := make([]*net.SRV, size)\n\tfor i := 0; i < size; i++ {\n\t\tdata[i] = &net.SRV{Target: string('a' + i), Weight: 1}\n\t}\n\tcheckDistribution(t, data, margin)\n}\n\nfunc TestUniformity(t *testing.T) {\n\ttestUniformity(t, 2, 0.05)\n\ttestUniformity(t, 3, 0.10)\n\ttestUniformity(t, 10, 0.20)\n\ttestWeighting(t, 0.05)\n}\n\nfunc testWeighting(t *testing.T, margin float64) {\n\trand.Seed(1)\n\tdata := []*net.SRV{\n\t\t{Target: \"a\", Weight: 60},\n\t\t{Target: \"b\", Weight: 30},\n\t\t{Target: \"c\", Weight: 10},\n\t}\n\tcheckDistribution(t, data, margin)\n}\n\nfunc TestWeighting(t *testing.T) {\n\ttestWeighting(t, 0.05)\n}\n\nfunc TestSplitHostPort(t *testing.T) {\n\ttype addr struct {\n\t\thost string\n\t\tport int\n\t}\n\ttable := map[string]addr{\n\t\t\"host-name:132\": {host: \"host-name\", port: 132},\n\t\t\"hostname:65535\": {host: \"hostname\", port: 65535},\n\t\t\"[::1]:321\": {host: \"::1\", port: 321},\n\t\t\"::1:432\": {host: \"::1\", port: 432},\n\t}\n\tfor input, want := range table {\n\t\tgotHost, gotPort, err := SplitHostPort(input)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"SplitHostPort error: %v\", err)\n\t\t}\n\t\tif gotHost != want.host || gotPort != want.port {\n\t\t\tt.Errorf(\"SplitHostPort(%#v) = (%v, %v), want (%v, %v)\", input, gotHost, gotPort, want.host, want.port)\n\t\t}\n\t}\n}\n\nfunc TestSplitHostPortFail(t *testing.T) {\n\t\/\/ These cases should all fail to parse.\n\tinputs := []string{\n\t\t\"host-name\",\n\t\t\"host-name:123abc\",\n\t}\n\tfor _, input := range inputs {\n\t\t_, _, err := SplitHostPort(input)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"expected error from SplitHostPort(%q), but got none\", input)\n\t\t}\n\t}\n}\n\nfunc TestJoinHostPort(t *testing.T) {\n\ttype addr struct {\n\t\thost string\n\t\tport int32\n\t}\n\ttable := map[string]addr{\n\t\t\"host-name:132\": {host: \"host-name\", port: 132},\n\t\t\"[::1]:321\": {host: \"::1\", port: 321},\n\t}\n\tfor want, input := range table {\n\t\tif got := JoinHostPort(input.host, input.port); got != want {\n\t\t\tt.Errorf(\"SplitHostPort(%v, %v) = %#v, want %#v\", input.host, input.port, got, want)\n\t\t}\n\t}\n}\n<commit_msg>Add unit test for function:ResolveIPv4Addrs<commit_after>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage netutil\n\nimport (\n\t\"math\/rand\"\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc checkDistribution(t *testing.T, data []*net.SRV, margin float64) {\n\tsum := 0\n\tfor _, srv := range data {\n\t\tsum += int(srv.Weight)\n\t}\n\n\tresults := make(map[string]int)\n\n\tcount := 1000\n\tfor j := 0; j < count; j++ {\n\t\td := make([]*net.SRV, len(data))\n\t\tcopy(d, data)\n\t\tbyPriorityWeight(d).shuffleByWeight()\n\t\tkey := d[0].Target\n\t\tresults[key] = results[key] + 1\n\t}\n\n\tactual := results[data[0].Target]\n\texpected := float64(count) * float64(data[0].Weight) \/ float64(sum)\n\tdiff := float64(actual) - expected\n\tt.Logf(\"actual: %v diff: %v e: %v m: %v\", actual, diff, expected, margin)\n\tif diff < 0 {\n\t\tdiff = -diff\n\t}\n\tif diff > (expected * margin) {\n\t\tt.Errorf(\"missed target weight: expected %v, %v\", expected, actual)\n\t}\n}\n\nfunc testUniformity(t *testing.T, size int, margin float64) {\n\trand.Seed(1)\n\tdata := make([]*net.SRV, size)\n\tfor i := 0; i < size; i++ {\n\t\tdata[i] = &net.SRV{Target: string('a' + i), Weight: 1}\n\t}\n\tcheckDistribution(t, data, margin)\n}\n\nfunc TestUniformity(t *testing.T) {\n\ttestUniformity(t, 2, 0.05)\n\ttestUniformity(t, 3, 0.10)\n\ttestUniformity(t, 10, 0.20)\n\ttestWeighting(t, 0.05)\n}\n\nfunc testWeighting(t *testing.T, margin float64) {\n\trand.Seed(1)\n\tdata := []*net.SRV{\n\t\t{Target: \"a\", Weight: 60},\n\t\t{Target: \"b\", Weight: 30},\n\t\t{Target: \"c\", Weight: 10},\n\t}\n\tcheckDistribution(t, data, margin)\n}\n\nfunc TestWeighting(t *testing.T) {\n\ttestWeighting(t, 0.05)\n}\n\nfunc TestSplitHostPort(t *testing.T) {\n\ttype addr struct {\n\t\thost string\n\t\tport int\n\t}\n\ttable := map[string]addr{\n\t\t\"host-name:132\": {host: \"host-name\", port: 132},\n\t\t\"hostname:65535\": {host: \"hostname\", port: 65535},\n\t\t\"[::1]:321\": {host: \"::1\", port: 321},\n\t\t\"::1:432\": {host: \"::1\", port: 432},\n\t}\n\tfor input, want := range table {\n\t\tgotHost, gotPort, err := SplitHostPort(input)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"SplitHostPort error: %v\", err)\n\t\t}\n\t\tif gotHost != want.host || gotPort != want.port {\n\t\t\tt.Errorf(\"SplitHostPort(%#v) = (%v, %v), want (%v, %v)\", input, gotHost, gotPort, want.host, want.port)\n\t\t}\n\t}\n}\n\nfunc TestSplitHostPortFail(t *testing.T) {\n\t\/\/ These cases should all fail to parse.\n\tinputs := []string{\n\t\t\"host-name\",\n\t\t\"host-name:123abc\",\n\t}\n\tfor _, input := range inputs {\n\t\t_, _, err := SplitHostPort(input)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"expected error from SplitHostPort(%q), but got none\", input)\n\t\t}\n\t}\n}\n\nfunc TestJoinHostPort(t *testing.T) {\n\ttype addr struct {\n\t\thost string\n\t\tport int32\n\t}\n\ttable := map[string]addr{\n\t\t\"host-name:132\": {host: \"host-name\", port: 132},\n\t\t\"[::1]:321\": {host: \"::1\", port: 321},\n\t}\n\tfor want, input := range table {\n\t\tif got := JoinHostPort(input.host, input.port); got != want {\n\t\t\tt.Errorf(\"SplitHostPort(%v, %v) = %#v, want %#v\", input.host, input.port, got, want)\n\t\t}\n\t}\n}\n\nfunc TestResolveIPv4Addrs(t *testing.T) {\n\tcases := []struct {\n\t\taddress string\n\t\texpected []string\n\t\texpectedError bool\n\t}{\n\t\t{\n\t\t\taddress: \"localhost:3306\",\n\t\t\texpected: []string{\"127.0.0.1:3306\"},\n\t\t},\n\t\t{\n\t\t\taddress: \"127.0.0.256:3306\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\taddress: \"localhost\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t{\n\t\t\taddress: \"InvalidHost:3306\",\n\t\t\texpectedError: true,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.address, func(t *testing.T) {\n\t\t\tgot, err := ResolveIPv4Addrs(c.address)\n\t\t\tif (err != nil) != c.expectedError {\n\t\t\t\tt.Errorf(\"expected error but got: %v\", err)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, c.expected) {\n\t\t\t\tt.Errorf(\"expected: %v, got: %v\", c.expected, got)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysqlctl\n\n\/*\nThis file contains the reparenting methods for mysqlctl.\n\nTODO(alainjobart) Once refactoring is done, remove unused code paths.\n*\/\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"vitess.io\/vitess\/go\/mysql\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ CreateReparentJournal returns the commands to execute to create\n\/\/ the _vt.reparent_journal table. It is safe to run these commands\n\/\/ even if the table already exists.\n\/\/\n\/\/ If the table was created by Vitess version 2.0, the following command\n\/\/ may need to be run:\n\/\/ ALTER TABLE _vt.reparent_journal MODIFY COLUMN replication_position VARBINARY(64000);\nfunc CreateReparentJournal() []string {\n\treturn []string{\n\t\t\"CREATE DATABASE IF NOT EXISTS _vt\",\n\t\tfmt.Sprintf(`CREATE TABLE IF NOT EXISTS _vt.reparent_journal (\n time_created_ns BIGINT UNSIGNED NOT NULL,\n action_name VARBINARY(250) NOT NULL,\n master_alias VARBINARY(32) NOT NULL,\n replication_position VARBINARY(%v) DEFAULT NULL,\n PRIMARY KEY (time_created_ns))\nENGINE=InnoDB`, mysql.MaximumPositionSize)}\n}\n\n\/\/ PopulateReparentJournal returns the SQL command to use to populate\n\/\/ the _vt.reparent_journal table, as well as the time_created_ns\n\/\/ value used.\nfunc PopulateReparentJournal(timeCreatedNS int64, actionName, masterAlias string, pos mysql.Position) string {\n\tposStr := mysql.EncodePosition(pos)\n\tif len(posStr) > mysql.MaximumPositionSize {\n\t\tposStr = posStr[:mysql.MaximumPositionSize]\n\t}\n\treturn fmt.Sprintf(\"INSERT INTO _vt.reparent_journal \"+\n\t\t\"(time_created_ns, action_name, master_alias, replication_position) \"+\n\t\t\"VALUES (%v, '%v', '%v', '%v')\",\n\t\ttimeCreatedNS, actionName, masterAlias, posStr)\n}\n\n\/\/ queryReparentJournal returns the SQL query to use to query the database\n\/\/ for a reparent_journal row.\nfunc queryReparentJournal(timeCreatedNS int64) string {\n\treturn fmt.Sprintf(\"SELECT action_name, master_alias, replication_position FROM _vt.reparent_journal WHERE time_created_ns=%v\", timeCreatedNS)\n}\n\n\/\/ WaitForReparentJournal will wait until the context is done for\n\/\/ the row in the reparent_journal table.\nfunc (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error {\n\tfor {\n\t\tqr, err := mysqld.FetchSuperQuery(ctx, queryReparentJournal(timeCreatedNS))\n\t\tif err == nil && len(qr.Rows) == 1 {\n\t\t\t\/\/ we have the row, we're done\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ wait a little bit, interrupt if context is done\n\t\tt := time.After(100 * time.Millisecond)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-t:\n\t\t}\n\t}\n}\n\n\/\/ Deprecated: use mysqld.MasterPosition() instead\nfunc (mysqld *Mysqld) DemoteMaster() (rp mysql.Position, err error) {\n\treturn mysqld.MasterPosition()\n}\n\n\/\/ PromoteSlave will promote a slave to be the new master.\nfunc (mysqld *Mysqld) PromoteSlave(hookExtraEnv map[string]string) (mysql.Position, error) {\n\tctx := context.TODO()\n\tconn, err := getPoolReconnect(ctx, mysqld.dbaPool)\n\tif err != nil {\n\t\treturn mysql.Position{}, err\n\t}\n\tdefer conn.Recycle()\n\n\t\/\/ Since we handle replication, just stop it.\n\tcmds := []string{\n\t\tconn.StopSlaveCommand(),\n\t\t\"RESET SLAVE ALL\", \/\/ \"ALL\" makes it forget master host:port.\n\t\t\/\/ When using semi-sync and GTID, a replica first connects to the new master with a given GTID set,\n\t\t\/\/ it can take a long time to scan the current binlog file to find the corresponding position.\n\t\t\/\/ This can cause commits that occur soon after the master is promoted to take a long time waiting\n\t\t\/\/ for a semi-sync ACK, since replication is not fully set up.\n\t\t\/\/ More details in: https:\/\/github.com\/vitessio\/vitess\/issues\/4161\n\t\t\"FLUSH BINARY LOGS\",\n\t}\n\n\tif err := mysqld.executeSuperQueryListConn(ctx, conn, cmds); err != nil {\n\t\treturn mysql.Position{}, err\n\t}\n\treturn conn.MasterPosition()\n}\n<commit_msg>Add log line indicating when WaitForReparentJournal fails<commit_after>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysqlctl\n\n\/*\nThis file contains the reparenting methods for mysqlctl.\n\nTODO(alainjobart) Once refactoring is done, remove unused code paths.\n*\/\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"vitess.io\/vitess\/go\/mysql\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ CreateReparentJournal returns the commands to execute to create\n\/\/ the _vt.reparent_journal table. It is safe to run these commands\n\/\/ even if the table already exists.\n\/\/\n\/\/ If the table was created by Vitess version 2.0, the following command\n\/\/ may need to be run:\n\/\/ ALTER TABLE _vt.reparent_journal MODIFY COLUMN replication_position VARBINARY(64000);\nfunc CreateReparentJournal() []string {\n\treturn []string{\n\t\t\"CREATE DATABASE IF NOT EXISTS _vt\",\n\t\tfmt.Sprintf(`CREATE TABLE IF NOT EXISTS _vt.reparent_journal (\n time_created_ns BIGINT UNSIGNED NOT NULL,\n action_name VARBINARY(250) NOT NULL,\n master_alias VARBINARY(32) NOT NULL,\n replication_position VARBINARY(%v) DEFAULT NULL,\n PRIMARY KEY (time_created_ns))\nENGINE=InnoDB`, mysql.MaximumPositionSize)}\n}\n\n\/\/ PopulateReparentJournal returns the SQL command to use to populate\n\/\/ the _vt.reparent_journal table, as well as the time_created_ns\n\/\/ value used.\nfunc PopulateReparentJournal(timeCreatedNS int64, actionName, masterAlias string, pos mysql.Position) string {\n\tposStr := mysql.EncodePosition(pos)\n\tif len(posStr) > mysql.MaximumPositionSize {\n\t\tposStr = posStr[:mysql.MaximumPositionSize]\n\t}\n\treturn fmt.Sprintf(\"INSERT INTO _vt.reparent_journal \"+\n\t\t\"(time_created_ns, action_name, master_alias, replication_position) \"+\n\t\t\"VALUES (%v, '%v', '%v', '%v')\",\n\t\ttimeCreatedNS, actionName, masterAlias, posStr)\n}\n\n\/\/ queryReparentJournal returns the SQL query to use to query the database\n\/\/ for a reparent_journal row.\nfunc queryReparentJournal(timeCreatedNS int64) string {\n\treturn fmt.Sprintf(\"SELECT action_name, master_alias, replication_position FROM _vt.reparent_journal WHERE time_created_ns=%v\", timeCreatedNS)\n}\n\n\/\/ WaitForReparentJournal will wait until the context is done for\n\/\/ the row in the reparent_journal table.\nfunc (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error {\n\tfor {\n\t\tqr, err := mysqld.FetchSuperQuery(ctx, queryReparentJournal(timeCreatedNS))\n\t\tif err == nil && len(qr.Rows) == 1 {\n\t\t\t\/\/ we have the row, we're done\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ wait a little bit, interrupt if context is done\n\t\tt := time.After(100 * time.Millisecond)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Warning(\"WaitForReparentJournal failed to see row before timeout.\")\n\t\t\treturn ctx.Err()\n\t\tcase <-t:\n\t\t}\n\t}\n}\n\n\/\/ Deprecated: use mysqld.MasterPosition() instead\nfunc (mysqld *Mysqld) DemoteMaster() (rp mysql.Position, err error) {\n\treturn mysqld.MasterPosition()\n}\n\n\/\/ PromoteSlave will promote a slave to be the new master.\nfunc (mysqld *Mysqld) PromoteSlave(hookExtraEnv map[string]string) (mysql.Position, error) {\n\tctx := context.TODO()\n\tconn, err := getPoolReconnect(ctx, mysqld.dbaPool)\n\tif err != nil {\n\t\treturn mysql.Position{}, err\n\t}\n\tdefer conn.Recycle()\n\n\t\/\/ Since we handle replication, just stop it.\n\tcmds := []string{\n\t\tconn.StopSlaveCommand(),\n\t\t\"RESET SLAVE ALL\", \/\/ \"ALL\" makes it forget master host:port.\n\t\t\/\/ When using semi-sync and GTID, a replica first connects to the new master with a given GTID set,\n\t\t\/\/ it can take a long time to scan the current binlog file to find the corresponding position.\n\t\t\/\/ This can cause commits that occur soon after the master is promoted to take a long time waiting\n\t\t\/\/ for a semi-sync ACK, since replication is not fully set up.\n\t\t\/\/ More details in: https:\/\/github.com\/vitessio\/vitess\/issues\/4161\n\t\t\"FLUSH BINARY LOGS\",\n\t}\n\n\tif err := mysqld.executeSuperQueryListConn(ctx, conn, cmds); err != nil {\n\t\treturn mysql.Position{}, err\n\t}\n\treturn conn.MasterPosition()\n}\n<|endoftext|>"} {"text":"<commit_before>package gqt_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/gqt\/runner\"\n\tuuid \"github.com\/nu7hatch\/gouuid\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Networking Uniqueness\", func() {\n\tvar (\n\t\tclient *runner.RunningGarden\n\t)\n\n\tBeforeEach(func() {\n\t\tconfig.LogLevel = \"error\"\n\t\tclient = runner.Start(config)\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(client.DestroyAndStop()).To(Succeed())\n\t})\n\n\tIt(\"should not allocate duplicate subnets\", func() {\n\t\tskipIfDev()\n\t\troutines := 25\n\t\tcontainersPerRoutine := 10\n\t\tallContainerInfos := []garden.ContainerInfo{}\n\t\tvar mutex = &sync.Mutex{}\n\n\t\twg := &sync.WaitGroup{}\n\t\twg.Add(routines)\n\t\tfor i := 0; i < routines; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer GinkgoRecover()\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tcontainerInfos := create(client, containersPerRoutine)\n\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tallContainerInfos = append(allContainerInfos, containerInfos...)\n\t\t\t}()\n\t\t}\n\n\t\twg.Wait()\n\t\tExpect(numContainers(client)).To(Equal(routines * containersPerRoutine))\n\t\tExpect(numBridges()).To(Equal(routines*containersPerRoutine), diagnose(allContainerInfos))\n\t})\n})\n\nfunc diagnose(containerInfos []garden.ContainerInfo) string {\n\tifconfigBytes, err := exec.Command(\"ifconfig\").CombinedOutput()\n\tinterfaceInfo := string(ifconfigBytes)\n\tif err != nil {\n\t\tinterfaceInfo += fmt.Sprintf(\"\\nifconfig error: %v\\n\", err)\n\t}\n\n\tvar allContainersInfo string\n\tbytes, err := json.Marshal(containerInfos)\n\tif err != nil {\n\t\tallContainersInfo = fmt.Sprintf(\"Could not marshal containers due to %v; raw containers data:\\n %#v\", err, containerInfos)\n\t} else {\n\t\tallContainersInfo = string(bytes)\n\t}\n\n\treturn fmt.Sprintf(\"%s\\n%s\\n\", interfaceInfo, allContainersInfo)\n}\n\nfunc create(client *runner.RunningGarden, n int) []garden.ContainerInfo {\n\tcontainerInfos := []garden.ContainerInfo{}\n\tfor i := 0; i < n; i++ {\n\t\ttime.Sleep(randomSleepDuration())\n\n\t\tid, err := uuid.NewV4()\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\trandomID := \"net-uniq-\" + id.String()\n\t\tcontainer, err := client.Create(garden.ContainerSpec{Handle: randomID})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tcontainerInfo, err := container.Info()\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tcontainerInfos = append(containerInfos, containerInfo)\n\n\t}\n\treturn containerInfos\n}\n\nfunc randomSleepDuration() time.Duration {\n\trandom := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tduration := time.Duration(50+random.Intn(500)) * time.Millisecond\n\treturn duration\n}\n\nfunc numContainers(client *runner.RunningGarden) int {\n\tcontainers, err := client.Containers(nil)\n\tExpect(err).NotTo(HaveOccurred())\n\treturn len(containers)\n}\n\nfunc numBridges() int {\n\tintfs, err := net.Interfaces()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tbridgeCount := 0\n\n\tfor _, intf := range intfs {\n\t\tif strings.Contains(intf.Name, fmt.Sprintf(\"w%dbrdg\", GinkgoParallelNode())) {\n\t\t\tbridgeCount++\n\t\t}\n\t}\n\n\treturn bridgeCount\n}\n<commit_msg>Filter out bridges to prevent test flakiness<commit_after>package gqt_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/gqt\/runner\"\n\tuuid \"github.com\/nu7hatch\/gouuid\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Networking Uniqueness\", func() {\n\tvar (\n\t\tclient *runner.RunningGarden\n\t)\n\n\tBeforeEach(func() {\n\t\tconfig.LogLevel = \"error\"\n\t\tclient = runner.Start(config)\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(client.DestroyAndStop()).To(Succeed())\n\t})\n\n\tIt(\"should not allocate duplicate subnets\", func() {\n\t\tskipIfDev()\n\t\troutines := 25\n\t\tcontainersPerRoutine := 10\n\t\tallContainerInfos := []garden.ContainerInfo{}\n\t\tvar mutex = &sync.Mutex{}\n\n\t\twg := &sync.WaitGroup{}\n\t\twg.Add(routines)\n\t\tfor i := 0; i < routines; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer GinkgoRecover()\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tcontainerInfos := create(client, containersPerRoutine)\n\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tallContainerInfos = append(allContainerInfos, containerInfos...)\n\t\t\t}()\n\t\t}\n\n\t\twg.Wait()\n\t\tExpect(numContainers(client)).To(Equal(routines * containersPerRoutine))\n\t\tExpect(numBridges()).To(Equal(routines*containersPerRoutine), diagnose(allContainerInfos))\n\t})\n})\n\nfunc diagnose(containerInfos []garden.ContainerInfo) string {\n\tifconfigBytes, err := exec.Command(\"ifconfig\").CombinedOutput()\n\tinterfaceInfo := string(ifconfigBytes)\n\tif err != nil {\n\t\tinterfaceInfo += fmt.Sprintf(\"\\nifconfig error: %v\\n\", err)\n\t}\n\n\tvar allContainersInfo string\n\tbytes, err := json.Marshal(containerInfos)\n\tif err != nil {\n\t\tallContainersInfo = fmt.Sprintf(\"Could not marshal containers due to %v; raw containers data:\\n %#v\", err, containerInfos)\n\t} else {\n\t\tallContainersInfo = string(bytes)\n\t}\n\n\treturn fmt.Sprintf(\"%s\\n%s\\n\", interfaceInfo, allContainersInfo)\n}\n\nfunc create(client *runner.RunningGarden, n int) []garden.ContainerInfo {\n\tcontainerInfos := []garden.ContainerInfo{}\n\tfor i := 0; i < n; i++ {\n\t\ttime.Sleep(randomSleepDuration())\n\n\t\tid, err := uuid.NewV4()\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\trandomID := \"net-uniq-\" + id.String()\n\t\tcontainer, err := client.Create(garden.ContainerSpec{Handle: randomID})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tcontainerInfo, err := container.Info()\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tcontainerInfos = append(containerInfos, containerInfo)\n\n\t}\n\treturn containerInfos\n}\n\nfunc randomSleepDuration() time.Duration {\n\trandom := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tduration := time.Duration(50+random.Intn(500)) * time.Millisecond\n\treturn duration\n}\n\nfunc numContainers(client *runner.RunningGarden) int {\n\tcontainers, err := client.Containers(nil)\n\tExpect(err).NotTo(HaveOccurred())\n\treturn len(containers)\n}\n\nfunc numBridges() int {\n\tintfs, err := net.Interfaces()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tbridgeCount := 0\n\n\tfor _, intf := range intfs {\n\t\tif strings.Contains(intf.Name, fmt.Sprintf(\"w%dbrdg-0afe\", GinkgoParallelNode())) {\n\t\t\tbridgeCount++\n\t\t}\n\t}\n\n\treturn bridgeCount\n}\n<|endoftext|>"} {"text":"<commit_before>\/* _ _\n *__ _____ __ ___ ___ __ _| |_ ___\n *\\ \\ \/\\ \/ \/ _ \\\/ _` \\ \\ \/ \/ |\/ _` | __\/ _ \\\n * \\ V V \/ __\/ (_| |\\ V \/| | (_| | || __\/\n * \\_\/\\_\/ \\___|\\__,_| \\_\/ |_|\\__,_|\\__\\___|\n *\n * Copyright © 2016 - 2018 Weaviate. All rights reserved.\n * LICENSE: https:\/\/github.com\/creativesoftwarefdn\/weaviate\/blob\/develop\/LICENSE.md\n * AUTHOR: Bob van Luijt (bob@kub.design)\n * See www.creativesoftwarefdn.org for details\n * Contact: @CreativeSofwFdn \/ bob@kub.design\n *\/\n\npackage graphqlapi\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/graphql-go\/graphql\"\n)\n\n\/\/ Build the GraphQL schema based on\n\/\/ 1) the static query structure (e.g. LocalFetch)\n\/\/ 2) the (dynamic) database schema from Weaviate\n\nfunc (g *GraphQL) genGraphqlSchema() error {\n\n\trootFieldsObject, err := g.assembleFullSchema()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not build GraphQL schema, because: %v\", err)\n\t}\n\n\tschemaObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateObj\",\n\t\tFields: rootFieldsObject,\n\t\tDescription: \"Location of the root query\",\n\t}\n\n\t\/\/ Run grahql.NewSchema in a sub-closure, so that we can recover from panics.\n\t\/\/ We need to use panics to return errors deep inside the dynamic generation of the GraphQL schema,\n\t\/\/ inside the FieldThunks. There is _no_ way to bubble up an error besides panicking.\n\tfunc() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tvar ok bool\n\t\t\t\terr, ok = r.(error) \/\/ can't shadow err here; we need the err from outside the function closure.\n\t\t\t\tif !ok {\n\t\t\t\t\terr = fmt.Errorf(\"%v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tg.weaviateGraphQLSchema, err = graphql.NewSchema(graphql.SchemaConfig{\n\t\t\tQuery: graphql.NewObject(schemaObject),\n\t\t})\n\t}()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not build GraphQL schema, because: %v\", err)\n\t} else {\n\t\treturn nil\n\t}\n}\n\n\/\/ check: regel class refs voor meerdere objecten als datatype (union)\n\/\/ check: maak dit ook voor Things\n\/\/ check: refactor naar objects returnen ipv object configs\n\/\/ check: check all Things strings\n\/\/ check: confirm output of dynamic schema generation; classes as properties in lists y\/n?\n\/\/ check: implement metafetch\n\/\/ TODO: implement filters\n\nfunc (g *GraphQL) assembleFullSchema() (graphql.Fields, error) {\n\n\t\/\/ This map is used to store all the Thing and Action ObjectConfigs, so that we can use them in references.\n\tconvertedFetchActionsAndThings := make(map[string]*graphql.Object)\n\n\tlocalConvertedFetchActions, err := g.genActionClassFieldsFromSchema(&convertedFetchActionsAndThings)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate action fields from schema for local convertedfetch because: %v\", err)\n\t}\n\n\tlocalConvertedFetchThings, err := g.genThingClassFieldsFromSchema(&convertedFetchActionsAndThings)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate thing fields from schema for local convertedfetch because: %v\", err)\n\t}\n\n\tlocalMetaFetchActions, err := g.genMetaClassFieldsFromSchema(g.databaseSchema.ActionSchema.Schema.Classes, true)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate action fields from schema for local metafetch because: %v\", err)\n\t}\n\n\tlocalMetaFetchThings, err := g.genMetaClassFieldsFromSchema(g.databaseSchema.ThingSchema.Schema.Classes, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate thing fields from schema for local metafetch because: %v\", err)\n\t}\n\n\tlocalConvertedFetchObject, err := g.genThingsAndActionsFieldsForWeaviateLocalConvertedFetchObj(localConvertedFetchActions, localConvertedFetchThings)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate things and action fields for local convertedfetch because: %v\", err)\n\t}\n\n\tlocalMetaFetchObject, err := g.genThingsAndActionsFieldsForWeaviateLocalMetaFetchGenericsObj(localMetaFetchActions, localMetaFetchThings)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate things and action fields for local metafetch because: %v\", err)\n\t}\n\n\tlocalMetaGenericsObject, err := g.genGenericsFieldForWeaviateLocalMetaFetchObj(localMetaFetchObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate generics field for local metafetch because: %v\", err)\n\t}\n\n\tlocalMetaAndConvertedFetchObject, err := g.genConvertedFetchAndMetaGenericsFields(localConvertedFetchObject, localMetaGenericsObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate meta and convertedfetch fields for local weaviateobject because: %v\", err)\n\t}\n\n\tlocalObject, err := g.genLocalField(localMetaAndConvertedFetchObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate local field for local weaviateobject because: %v\", err)\n\t}\n\n\trootFieldsObject, err := g.genRootQueryFields(localObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate root query because: %v\", err)\n\t}\n\n\treturn rootFieldsObject, nil\n}\n\n\/\/ generate the static parts of the schema\nfunc (g *GraphQL) genThingsAndActionsFieldsForWeaviateLocalConvertedFetchObj(localConvertedFetchActions *graphql.Object,\n\tlocalConvertedFetchThings *graphql.Object) (*graphql.Object, error) {\n\n\tconvertedFetchThingsAndActionFields := graphql.Fields{\n\t\t\"Actions\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalConvertedFetchActions\",\n\t\t\tDescription: \"Locate Actions on the local Weaviate\",\n\t\t\tType: localConvertedFetchActions,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t\t\"Things\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalConvertedFetchThings\",\n\t\t\tDescription: \"Locate Things on the local Weaviate\",\n\t\t\tType: localConvertedFetchThings,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tconvertedFetchThingsAndActionFieldsObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalConvertedFetchObj\",\n\t\tFields: convertedFetchThingsAndActionFields,\n\t\tDescription: \"Fetch things or actions on the internal Weaviate\",\n\t}\n\treturn graphql.NewObject(convertedFetchThingsAndActionFieldsObject), nil\n}\n\nfunc (g *GraphQL) genThingsAndActionsFieldsForWeaviateLocalMetaFetchGenericsObj(localMetaFetchActions *graphql.Object, localMetaFetchThings *graphql.Object) (*graphql.Object, error) {\n\n\tmetaFetchGenericsThingsAndActionFields := graphql.Fields{\n\t\t\"Actions\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetchGenericsActions\",\n\t\t\tDescription: \"Action to fetch for meta generic fetch\",\n\t\t\tType: localMetaFetchActions,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t\t\"Things\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetchGenericsThings\",\n\t\t\tDescription: \"Thing to fetch for meta generic fetch\",\n\t\t\tType: localMetaFetchThings,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tmetaFetchGenericsThingsAndActionFieldsObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalMetaFetchGenericsObj\",\n\t\tFields: metaFetchGenericsThingsAndActionFields,\n\t\tDescription: \"Object type to fetch\",\n\t}\n\treturn graphql.NewObject(metaFetchGenericsThingsAndActionFieldsObject), nil\n}\n\nfunc (g *GraphQL) genGenericsFieldForWeaviateLocalMetaFetchObj(localMetaFetchObject *graphql.Object) (*graphql.Object, error) {\n\n\tmetaFetchGenericsField := graphql.Fields{\n\t\t\"Generics\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetchGenericsObj\",\n\t\t\tDescription: \"Fetch generic meta information based on the type\",\n\t\t\tType: localMetaFetchObject,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tmetaFetchGenericsFieldObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalMetaFetchObj\",\n\t\tFields: metaFetchGenericsField,\n\t\tDescription: \"Fetch things or actions on the internal Weaviate\",\n\t}\n\treturn graphql.NewObject(metaFetchGenericsFieldObject), nil\n}\n\nfunc (g *GraphQL) genConvertedFetchAndMetaGenericsFields(\n\tlocalConvertedFetchObject *graphql.Object,\n\tlocalMetaGenericsObject *graphql.Object) (*graphql.Object, error) {\n\n\tconvertedAndMetaFetchFields := graphql.Fields{\n\t\t\"ConvertedFetch\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalConvertedFetch\",\n\t\t\tType: localConvertedFetchObject,\n\t\t\tDescription: \"Do a converted fetch to search Things or Actions on the local weaviate\",\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t\t\"MetaFetch\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetch\",\n\t\t\tType: localMetaGenericsObject,\n\t\t\tDescription: \"Fetch meta information about Things or Actions on the local weaviate\",\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tweaviateLocalObject := &graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalObj\",\n\t\tFields: convertedAndMetaFetchFields,\n\t\tDescription: \"Type of fetch on the internal Weaviate\",\n\t}\n\treturn graphql.NewObject(*weaviateLocalObject), nil\n}\n\nfunc (g *GraphQL) genLocalField(localMetaAndConvertedFetchObject *graphql.Object) (*graphql.Field, error) {\n\n\tfield := graphql.Field{\n\t\tType: localMetaAndConvertedFetchObject,\n\t\tDescription: \"Locate on the local Weaviate\",\n\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t},\n\t}\n\treturn &field, nil\n}\n\nfunc (g *GraphQL) genRootQueryFields(localField *graphql.Field) (graphql.Fields, error) {\n\n\tvar rootQueryFields = graphql.Fields{\n\t\t\"Local\": localField,\n\t\t\"Network\": nil,\n\t}\n\treturn rootQueryFields, nil\n}\n\nfunc mergeStrings(stringParts ...string) string {\n\n\tvar buffer bytes.Buffer\n\tfor _, stringPart := range stringParts {\n\t\tbuffer.WriteString(stringPart)\n\t}\n\n\treturn buffer.String()\n}\n<commit_msg>gh-384: minor bug fix<commit_after>\/* _ _\n *__ _____ __ ___ ___ __ _| |_ ___\n *\\ \\ \/\\ \/ \/ _ \\\/ _` \\ \\ \/ \/ |\/ _` | __\/ _ \\\n * \\ V V \/ __\/ (_| |\\ V \/| | (_| | || __\/\n * \\_\/\\_\/ \\___|\\__,_| \\_\/ |_|\\__,_|\\__\\___|\n *\n * Copyright © 2016 - 2018 Weaviate. All rights reserved.\n * LICENSE: https:\/\/github.com\/creativesoftwarefdn\/weaviate\/blob\/develop\/LICENSE.md\n * AUTHOR: Bob van Luijt (bob@kub.design)\n * See www.creativesoftwarefdn.org for details\n * Contact: @CreativeSofwFdn \/ bob@kub.design\n *\/\n\npackage graphqlapi\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/graphql-go\/graphql\"\n)\n\n\/\/ Build the GraphQL schema based on\n\/\/ 1) the static query structure (e.g. LocalFetch)\n\/\/ 2) the (dynamic) database schema from Weaviate\n\nfunc (g *GraphQL) buildGraphqlSchema() error {\n\n\trootFieldsObject, err := g.assembleFullSchema()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not build GraphQL schema, because: %v\", err)\n\t}\n\n\tschemaObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateObj\",\n\t\tFields: rootFieldsObject,\n\t\tDescription: \"Location of the root query\",\n\t}\n\n\t\/\/ Run grahql.NewSchema in a sub-closure, so that we can recover from panics.\n\t\/\/ We need to use panics to return errors deep inside the dynamic generation of the GraphQL schema,\n\t\/\/ inside the FieldThunks. There is _no_ way to bubble up an error besides panicking.\n\tfunc() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tvar ok bool\n\t\t\t\terr, ok = r.(error) \/\/ can't shadow err here; we need the err from outside the function closure.\n\t\t\t\tif !ok {\n\t\t\t\t\terr = fmt.Errorf(\"%v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tg.weaviateGraphQLSchema, err = graphql.NewSchema(graphql.SchemaConfig{\n\t\t\tQuery: graphql.NewObject(schemaObject),\n\t\t})\n\t}()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not build GraphQL schema, because: %v\", err)\n\t} else {\n\t\treturn nil\n\t}\n}\n\n\/\/ check: regel class refs voor meerdere objecten als datatype (union)\n\/\/ check: maak dit ook voor Things\n\/\/ check: refactor naar objects returnen ipv object configs\n\/\/ check: check all Things strings\n\/\/ check: confirm output of dynamic schema generation; classes as properties in lists y\/n?\n\/\/ check: implement metafetch\n\/\/ TODO: implement filters\n\nfunc (g *GraphQL) assembleFullSchema() (graphql.Fields, error) {\n\n\t\/\/ This map is used to store all the Thing and Action ObjectConfigs, so that we can use them in references.\n\tconvertedFetchActionsAndThings := make(map[string]*graphql.Object)\n\n\tlocalConvertedFetchActions, err := g.genActionClassFieldsFromSchema(&convertedFetchActionsAndThings)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate action fields from schema for local convertedfetch because: %v\", err)\n\t}\n\n\tlocalConvertedFetchThings, err := g.genThingClassFieldsFromSchema(&convertedFetchActionsAndThings)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate thing fields from schema for local convertedfetch because: %v\", err)\n\t}\n\n\tlocalMetaFetchActions, err := g.genMetaClassFieldsFromSchema(g.databaseSchema.ActionSchema.Schema.Classes, true)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate action fields from schema for local metafetch because: %v\", err)\n\t}\n\n\tlocalMetaFetchThings, err := g.genMetaClassFieldsFromSchema(g.databaseSchema.ThingSchema.Schema.Classes, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate thing fields from schema for local metafetch because: %v\", err)\n\t}\n\n\tlocalConvertedFetchObject, err := g.genThingsAndActionsFieldsForWeaviateLocalConvertedFetchObj(localConvertedFetchActions, localConvertedFetchThings)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate things and action fields for local convertedfetch because: %v\", err)\n\t}\n\n\tlocalMetaFetchObject, err := g.genThingsAndActionsFieldsForWeaviateLocalMetaFetchGenericsObj(localMetaFetchActions, localMetaFetchThings)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate things and action fields for local metafetch because: %v\", err)\n\t}\n\n\tlocalMetaGenericsObject, err := g.genGenericsFieldForWeaviateLocalMetaFetchObj(localMetaFetchObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate generics field for local metafetch because: %v\", err)\n\t}\n\n\tlocalMetaAndConvertedFetchObject, err := g.genConvertedFetchAndMetaGenericsFields(localConvertedFetchObject, localMetaGenericsObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate meta and convertedfetch fields for local weaviateobject because: %v\", err)\n\t}\n\n\tlocalObject, err := g.genLocalField(localMetaAndConvertedFetchObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate local field for local weaviateobject because: %v\", err)\n\t}\n\n\trootFieldsObject, err := g.genRootQueryFields(localObject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate root query because: %v\", err)\n\t}\n\n\treturn rootFieldsObject, nil\n}\n\n\/\/ generate the static parts of the schema\nfunc (g *GraphQL) genThingsAndActionsFieldsForWeaviateLocalConvertedFetchObj(localConvertedFetchActions *graphql.Object,\n\tlocalConvertedFetchThings *graphql.Object) (*graphql.Object, error) {\n\n\tconvertedFetchThingsAndActionFields := graphql.Fields{\n\t\t\"Actions\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalConvertedFetchActions\",\n\t\t\tDescription: \"Locate Actions on the local Weaviate\",\n\t\t\tType: localConvertedFetchActions,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t\t\"Things\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalConvertedFetchThings\",\n\t\t\tDescription: \"Locate Things on the local Weaviate\",\n\t\t\tType: localConvertedFetchThings,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tconvertedFetchThingsAndActionFieldsObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalConvertedFetchObj\",\n\t\tFields: convertedFetchThingsAndActionFields,\n\t\tDescription: \"Fetch things or actions on the internal Weaviate\",\n\t}\n\treturn graphql.NewObject(convertedFetchThingsAndActionFieldsObject), nil\n}\n\nfunc (g *GraphQL) genThingsAndActionsFieldsForWeaviateLocalMetaFetchGenericsObj(localMetaFetchActions *graphql.Object, localMetaFetchThings *graphql.Object) (*graphql.Object, error) {\n\n\tmetaFetchGenericsThingsAndActionFields := graphql.Fields{\n\t\t\"Actions\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetchGenericsActions\",\n\t\t\tDescription: \"Action to fetch for meta generic fetch\",\n\t\t\tType: localMetaFetchActions,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t\t\"Things\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetchGenericsThings\",\n\t\t\tDescription: \"Thing to fetch for meta generic fetch\",\n\t\t\tType: localMetaFetchThings,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tmetaFetchGenericsThingsAndActionFieldsObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalMetaFetchGenericsObj\",\n\t\tFields: metaFetchGenericsThingsAndActionFields,\n\t\tDescription: \"Object type to fetch\",\n\t}\n\treturn graphql.NewObject(metaFetchGenericsThingsAndActionFieldsObject), nil\n}\n\nfunc (g *GraphQL) genGenericsFieldForWeaviateLocalMetaFetchObj(localMetaFetchObject *graphql.Object) (*graphql.Object, error) {\n\n\tmetaFetchGenericsField := graphql.Fields{\n\t\t\"Generics\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetchGenericsObj\",\n\t\t\tDescription: \"Fetch generic meta information based on the type\",\n\t\t\tType: localMetaFetchObject,\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tmetaFetchGenericsFieldObject := graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalMetaFetchObj\",\n\t\tFields: metaFetchGenericsField,\n\t\tDescription: \"Fetch things or actions on the internal Weaviate\",\n\t}\n\treturn graphql.NewObject(metaFetchGenericsFieldObject), nil\n}\n\nfunc (g *GraphQL) genConvertedFetchAndMetaGenericsFields(\n\tlocalConvertedFetchObject *graphql.Object,\n\tlocalMetaGenericsObject *graphql.Object) (*graphql.Object, error) {\n\n\tconvertedAndMetaFetchFields := graphql.Fields{\n\t\t\"ConvertedFetch\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalConvertedFetch\",\n\t\t\tType: localConvertedFetchObject,\n\t\t\tDescription: \"Do a converted fetch to search Things or Actions on the local weaviate\",\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t\t\"MetaFetch\": &graphql.Field{\n\t\t\tName: \"WeaviateLocalMetaFetch\",\n\t\t\tType: localMetaGenericsObject,\n\t\t\tDescription: \"Fetch meta information about Things or Actions on the local weaviate\",\n\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t\t},\n\t\t},\n\t}\n\tweaviateLocalObject := &graphql.ObjectConfig{\n\t\tName: \"WeaviateLocalObj\",\n\t\tFields: convertedAndMetaFetchFields,\n\t\tDescription: \"Type of fetch on the internal Weaviate\",\n\t}\n\treturn graphql.NewObject(*weaviateLocalObject), nil\n}\n\nfunc (g *GraphQL) genLocalField(localMetaAndConvertedFetchObject *graphql.Object) (*graphql.Field, error) {\n\n\tfield := graphql.Field{\n\t\tType: localMetaAndConvertedFetchObject,\n\t\tDescription: \"Locate on the local Weaviate\",\n\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\treturn nil, fmt.Errorf(\"Not supported\")\n\t\t},\n\t}\n\treturn &field, nil\n}\n\nfunc (g *GraphQL) genRootQueryFields(localField *graphql.Field) (graphql.Fields, error) {\n\n\tvar rootQueryFields = graphql.Fields{\n\t\t\"Local\": localField,\n\t\t\"Network\": nil,\n\t}\n\treturn rootQueryFields, nil\n}\n\nfunc mergeStrings(stringParts ...string) string {\n\n\tvar buffer bytes.Buffer\n\tfor _, stringPart := range stringParts {\n\t\tbuffer.WriteString(stringPart)\n\t}\n\n\treturn buffer.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package qingcloud\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\tqc \"github.com\/yunify\/qingcloud-sdk-go\/service\"\n)\n\nfunc TestAccQingcloudVxNet_basic(t *testing.T) {\n\tvar vxnet qc.DescribeVxNetsOutput\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() {\n\t\t\ttestAccPreCheck(t)\n\t\t},\n\n\t\t\/\/ module name\n\t\tIDRefreshName: \"qingcloud_vxnet.foo\",\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckVxNetDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"type\", \"1\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigTwo,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"type\", \"1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"description\", \"vxnet\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"name\", \"vxnet\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigThree,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"type\", \"0\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"description\", \"vxnet\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"name\", \"vxnet\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n\n}\n\nfunc TestAccQingcloudVxNet_tag(t *testing.T) {\n\tvar vxnet qc.DescribeVxNetsOutput\n\tvxnetTag1Name := os.Getenv(\"TRAVIS_BUILD_ID\") + \"-\" + os.Getenv(\"TRAVIS_JOB_NUMBER\") + \"-vxnet-tag1\"\n\tvxnetTag2Name := os.Getenv(\"TRAVIS_BUILD_ID\") + \"-\" + os.Getenv(\"TRAVIS_JOB_NUMBER\") + \"-vxnet-tag2\"\n\n\ttestTagNameValue := func(names ...string) resource.TestCheckFunc {\n\t\treturn func(state *terraform.State) error {\n\t\t\ttags := vxnet.VxNetSet[0].Tags\n\t\t\tsame_count := 0\n\t\t\tfor _, tag := range tags {\n\t\t\t\tfor _, name := range names {\n\t\t\t\t\tif qc.StringValue(tag.TagName) == name {\n\t\t\t\t\t\tsame_count++\n\t\t\t\t\t}\n\t\t\t\t\tif same_count == len(vxnet.VxNetSet[0].Tags) {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"tag name error %#v\", names)\n\t\t}\n\t}\n\ttestTagDetach := func() resource.TestCheckFunc {\n\t\treturn func(state *terraform.State) error {\n\t\t\tif len(vxnet.VxNetSet[0].Tags) != 0 {\n\t\t\t\treturn fmt.Errorf(\"tag not detach \")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() {\n\t\t\ttestAccPreCheck(t)\n\t\t},\n\n\t\t\/\/ module name\n\t\tIDRefreshName: \"qingcloud_vxnet.foo\",\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckVxNetDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: fmt.Sprintf(testAccVxNetConfigTagTemplate, vxnetTag1Name, vxnetTag2Name),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\ttestTagNameValue(vxnetTag1Name, vxnetTag2Name),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: fmt.Sprintf(testAccVxNetConfigTagTwoTemplate, vxnetTag1Name, vxnetTag2Name),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\ttestTagDetach(),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n\n}\n\nfunc testAccCheckVxNetExists(n string, eip *qc.DescribeVxNetsOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No VxNet ID is set\")\n\t\t}\n\n\t\tclient := testAccProvider.Meta().(*QingCloudClient)\n\t\tinput := new(qc.DescribeVxNetsInput)\n\t\tinput.VxNets = []*string{qc.String(rs.Primary.ID)}\n\t\td, err := client.vxnet.DescribeVxNets(input)\n\n\t\tlog.Printf(\"[WARN] eip id %#v\", rs.Primary.ID)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif d == nil || len(d.VxNetSet) == 0 {\n\t\t\treturn fmt.Errorf(\"VxNet not found\")\n\t\t}\n\n\t\t*eip = *d\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckVxNetDestroy(s *terraform.State) error {\n\treturn testAccCheckVxNetDestroyWithProvider(s, testAccProvider)\n}\n\nfunc testAccCheckVxNetDestroyWithProvider(s *terraform.State, provider *schema.Provider) error {\n\tclient := provider.Meta().(*QingCloudClient)\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"qingcloud_vxnet\" {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Try to find the resource\n\t\tinput := new(qc.DescribeVxNetsInput)\n\t\tinput.VxNets = []*string{qc.String(rs.Primary.ID)}\n\t\toutput, err := client.vxnet.DescribeVxNets(input)\n\t\tif err == nil && qc.IntValue(output.RetCode) == 0 {\n\t\t\tif len(output.VxNetSet) != 0 {\n\t\t\t\treturn fmt.Errorf(\"Found VxNet: %s\", rs.Primary.ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nconst testAccVxNetConfig = `\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n} `\n\nconst testAccVxNetConfigTwo = `\nresource \"qingcloud_vxnet\" \"foo\" {\n name = \"vxnet\"\n description = \"vxnet\"\n\ttype = 1\n} `\nconst testAccVxNetConfigThree = `\nresource \"qingcloud_vxnet\" \"foo\" {\n name = \"vxnet\"\n description = \"vxnet\"\n\ttype = 0\n} `\n\nconst testAccVxNetConfigTagTemplate = `\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n\ttag_ids = [\"${qingcloud_tag.test.id}\",\n\t\t\t\t\"${qingcloud_tag.test2.id}\"]\n}\nresource \"qingcloud_tag\" \"test\"{\n\tname=\"%v\"\n}\nresource \"qingcloud_tag\" \"test2\"{\n\tname=\"%v\"\n}\n`\n\nconst testAccVxNetConfigTagTwoTemplate = `\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n}\nresource \"qingcloud_tag\" \"test\"{\n\tname=\"%v\"\n}\nresource \"qingcloud_tag\" \"test2\"{\n\tname=\"%v\"\n}\n`\n<commit_msg>Add vxnet with vpc test<commit_after>package qingcloud\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\tqc \"github.com\/yunify\/qingcloud-sdk-go\/service\"\n)\n\nfunc TestAccQingcloudVxNet_basic(t *testing.T) {\n\tvar vxnet qc.DescribeVxNetsOutput\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() {\n\t\t\ttestAccPreCheck(t)\n\t\t},\n\n\t\t\/\/ module name\n\t\tIDRefreshName: \"qingcloud_vxnet.foo\",\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckVxNetDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"type\", \"1\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigTwo,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"type\", \"1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"description\", \"vxnet\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"name\", \"vxnet\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigThree,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"type\", \"0\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"description\", \"vxnet\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", \"name\", \"vxnet\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n\n}\n\nfunc TestAccQingcloudVxNet_tag(t *testing.T) {\n\tvar vxnet qc.DescribeVxNetsOutput\n\tvxnetTag1Name := os.Getenv(\"TRAVIS_BUILD_ID\") + \"-\" + os.Getenv(\"TRAVIS_JOB_NUMBER\") + \"-vxnet-tag1\"\n\tvxnetTag2Name := os.Getenv(\"TRAVIS_BUILD_ID\") + \"-\" + os.Getenv(\"TRAVIS_JOB_NUMBER\") + \"-vxnet-tag2\"\n\n\ttestTagNameValue := func(names ...string) resource.TestCheckFunc {\n\t\treturn func(state *terraform.State) error {\n\t\t\ttags := vxnet.VxNetSet[0].Tags\n\t\t\tsame_count := 0\n\t\t\tfor _, tag := range tags {\n\t\t\t\tfor _, name := range names {\n\t\t\t\t\tif qc.StringValue(tag.TagName) == name {\n\t\t\t\t\t\tsame_count++\n\t\t\t\t\t}\n\t\t\t\t\tif same_count == len(vxnet.VxNetSet[0].Tags) {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"tag name error %#v\", names)\n\t\t}\n\t}\n\ttestTagDetach := func() resource.TestCheckFunc {\n\t\treturn func(state *terraform.State) error {\n\t\t\tif len(vxnet.VxNetSet[0].Tags) != 0 {\n\t\t\t\treturn fmt.Errorf(\"tag not detach \")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() {\n\t\t\ttestAccPreCheck(t)\n\t\t},\n\n\t\t\/\/ module name\n\t\tIDRefreshName: \"qingcloud_vxnet.foo\",\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckVxNetDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: fmt.Sprintf(testAccVxNetConfigTagTemplate, vxnetTag1Name, vxnetTag2Name),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\ttestTagNameValue(vxnetTag1Name, vxnetTag2Name),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: fmt.Sprintf(testAccVxNetConfigTagTwoTemplate, vxnetTag1Name, vxnetTag2Name),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\ttestTagDetach(),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n\n}\n\nfunc TestAccQingcloudVxNet_vpc(t *testing.T) {\n\tvar vxnet qc.DescribeVxNetsOutput\n\n\ttestVpcAttach := func() resource.TestCheckFunc {\n\t\treturn func(state *terraform.State) error {\n\t\t\tif vxnet.VxNetSet[0].Router != nil {\n\t\t\t\tinput := new(qc.DescribeRouterVxNetsInput)\n\t\t\t\tinput.Router = vxnet.VxNetSet[0].VpcRouterID\n\t\t\t\tinput.Verbose = qc.Int(1)\n\t\t\t\tclient := testAccProvider.Meta().(*QingCloudClient)\n\t\t\t\td, err := client.router.DescribeRouterVxNets(input)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif d == nil || len(d.RouterVxNetSet) == 0 {\n\t\t\t\t\treturn fmt.Errorf(\"Router not found \")\n\t\t\t\t}\n\t\t\t\thaveVxnet := false\n\t\t\t\tfor _, oneVxnet := range d.RouterVxNetSet {\n\t\t\t\t\tif qc.StringValue(oneVxnet.VxNetID) == qc.StringValue(vxnet.VxNetSet[0].VxNetID) {\n\t\t\t\t\t\thaveVxnet = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !haveVxnet {\n\t\t\t\t\treturn fmt.Errorf(\"Router not match \")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Can not find router \")\n\t\t\t}\n\t\t}\n\t}\n\ttestVpcDetach := func() resource.TestCheckFunc {\n\t\treturn func(state *terraform.State) error {\n\t\t\tif vxnet.VxNetSet[0].Router != nil && qc.StringValue(vxnet.VxNetSet[0].Router.RouterID) != \"\" {\n\t\t\t\treturn fmt.Errorf(\"Router not detach \")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() {\n\t\t\ttestAccPreCheck(t)\n\t\t},\n\n\t\t\/\/ module name\n\t\tIDRefreshName: \"qingcloud_vxnet.foo\",\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckVxNetDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigVpc,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\ttestVpcAttach(),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigVpcTwo,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t\ttestVpcDetach(),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccVxNetConfigVpcThree,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckVxNetExists(\n\t\t\t\t\t\t\"qingcloud_vxnet.foo\", &vxnet),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n\n}\n\nfunc testAccCheckVxNetExists(n string, eip *qc.DescribeVxNetsOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No VxNet ID is set\")\n\t\t}\n\n\t\tclient := testAccProvider.Meta().(*QingCloudClient)\n\t\tinput := new(qc.DescribeVxNetsInput)\n\t\tinput.VxNets = []*string{qc.String(rs.Primary.ID)}\n\t\td, err := client.vxnet.DescribeVxNets(input)\n\n\t\tlog.Printf(\"[WARN] eip id %#v\", rs.Primary.ID)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif d == nil || len(d.VxNetSet) == 0 {\n\t\t\treturn fmt.Errorf(\"VxNet not found\")\n\t\t}\n\n\t\t*eip = *d\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckVxNetDestroy(s *terraform.State) error {\n\treturn testAccCheckVxNetDestroyWithProvider(s, testAccProvider)\n}\n\nfunc testAccCheckVxNetDestroyWithProvider(s *terraform.State, provider *schema.Provider) error {\n\tclient := provider.Meta().(*QingCloudClient)\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"qingcloud_vxnet\" {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Try to find the resource\n\t\tinput := new(qc.DescribeVxNetsInput)\n\t\tinput.VxNets = []*string{qc.String(rs.Primary.ID)}\n\t\toutput, err := client.vxnet.DescribeVxNets(input)\n\t\tif err == nil && qc.IntValue(output.RetCode) == 0 {\n\t\t\tif len(output.VxNetSet) != 0 {\n\t\t\t\treturn fmt.Errorf(\"Found VxNet: %s\", rs.Primary.ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nconst testAccVxNetConfig = `\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n} `\n\nconst testAccVxNetConfigTwo = `\nresource \"qingcloud_vxnet\" \"foo\" {\n name = \"vxnet\"\n description = \"vxnet\"\n\ttype = 1\n} `\nconst testAccVxNetConfigThree = `\nresource \"qingcloud_vxnet\" \"foo\" {\n name = \"vxnet\"\n description = \"vxnet\"\n\ttype = 0\n} `\n\nconst testAccVxNetConfigTagTemplate = `\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n\ttag_ids = [\"${qingcloud_tag.test.id}\",\n\t\t\t\t\"${qingcloud_tag.test2.id}\"]\n}\nresource \"qingcloud_tag\" \"test\"{\n\tname=\"%v\"\n}\nresource \"qingcloud_tag\" \"test2\"{\n\tname=\"%v\"\n}\n`\n\nconst testAccVxNetConfigTagTwoTemplate = `\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n}\nresource \"qingcloud_tag\" \"test\"{\n\tname=\"%v\"\n}\nresource \"qingcloud_tag\" \"test2\"{\n\tname=\"%v\"\n}\n`\n\nconst testAccVxNetConfigVpc = `\n\nresource \"qingcloud_security_group\" \"foo\" {\n name = \"first_sg\"\n}\nresource \"qingcloud_vpc\" \"foo\" {\n\tsecurity_group_id = \"${qingcloud_security_group.foo.id}\"\n\tvpc_network = \"192.168.0.0\/16\"\n}\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n\tvpc_id = \"${qingcloud_vpc.foo.id}\"\n\tip_network = \"192.168.0.0\/24\"\n}\n`\n\nconst testAccVxNetConfigVpcTwo = `\n\nresource \"qingcloud_security_group\" \"foo\" {\n name = \"first_sg\"\n}\nresource \"qingcloud_vpc\" \"foo\" {\n\tsecurity_group_id = \"${qingcloud_security_group.foo.id}\"\n\tvpc_network = \"192.168.0.0\/16\"\n}\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n}\n`\nconst testAccVxNetConfigVpcThree = `\n\nresource \"qingcloud_security_group\" \"foo\" {\n name = \"first_sg\"\n}\nresource \"qingcloud_vpc\" \"foo\" {\n\tsecurity_group_id = \"${qingcloud_security_group.foo.id}\"\n\tvpc_network = \"192.168.0.0\/16\"\n}\n\nresource \"qingcloud_vxnet\" \"foo\" {\n type = 1\n\tvpc_id = \"${qingcloud_vpc.foo.id}\"\n\tip_network = \"192.168.0.0\/24\"\n}\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cshared_test\n\nimport (\n\t\"debug\/elf\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"unicode\"\n)\n\n\/\/ C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)).\nvar cc []string\n\n\/\/ An environment with GOPATH=$(pwd).\nvar gopathEnv []string\n\n\/\/ \".exe\" on Windows.\nvar exeSuffix string\n\nvar GOOS, GOARCH, GOROOT string\nvar installdir, androiddir, ldlibrarypath string\nvar libSuffix, libgoname string\n\nfunc init() {\n\tGOOS = goEnv(\"GOOS\")\n\tGOARCH = goEnv(\"GOARCH\")\n\tGOROOT = goEnv(\"GOROOT\")\n\n\tif _, err := os.Stat(GOROOT); os.IsNotExist(err) {\n\t\tlog.Fatalf(\"Unable able to find GOROOT at '%s'\", GOROOT)\n\t}\n\n\t\/\/ Directory where cgo headers and outputs will be installed.\n\t\/\/ The installation directory format varies depending on the platform.\n\tinstalldir = path.Join(\"pkg\", fmt.Sprintf(\"%s_%s_testcshared_shared\", GOOS, GOARCH))\n\tswitch GOOS {\n\tcase \"darwin\":\n\t\tlibSuffix = \"dylib\"\n\t\tinstalldir = path.Join(\"pkg\", fmt.Sprintf(\"%s_%s_testcshared\", GOOS, GOARCH))\n\tcase \"windows\":\n\t\tlibSuffix = \"dll\"\n\tdefault:\n\t\tlibSuffix = \"so\"\n\t}\n\n\tandroiddir = fmt.Sprintf(\"\/data\/local\/tmp\/testcshared-%d\", os.Getpid())\n\tlibgoname = \"libgo.\" + libSuffix\n\n\tccOut := goEnv(\"CC\")\n\tcc = []string{string(ccOut)}\n\n\tout := goEnv(\"GOGCCFLAGS\")\n\tquote := '\\000'\n\tstart := 0\n\tlastSpace := true\n\tbackslash := false\n\ts := string(out)\n\tfor i, c := range s {\n\t\tif quote == '\\000' && unicode.IsSpace(c) {\n\t\t\tif !lastSpace {\n\t\t\t\tcc = append(cc, s[start:i])\n\t\t\t\tlastSpace = true\n\t\t\t}\n\t\t} else {\n\t\t\tif lastSpace {\n\t\t\t\tstart = i\n\t\t\t\tlastSpace = false\n\t\t\t}\n\t\t\tif quote == '\\000' && !backslash && (c == '\"' || c == '\\'') {\n\t\t\t\tquote = c\n\t\t\t\tbackslash = false\n\t\t\t} else if !backslash && quote == c {\n\t\t\t\tquote = '\\000'\n\t\t\t} else if (quote == '\\000' || quote == '\"') && !backslash && c == '\\\\' {\n\t\t\t\tbackslash = true\n\t\t\t} else {\n\t\t\t\tbackslash = false\n\t\t\t}\n\t\t}\n\t}\n\tif !lastSpace {\n\t\tcc = append(cc, s[start:])\n\t}\n\n\tif GOOS == \"darwin\" {\n\t\t\/\/ For Darwin\/ARM.\n\t\t\/\/ TODO(crawshaw): can we do better?\n\t\tcc = append(cc, []string{\"-framework\", \"CoreFoundation\", \"-framework\", \"Foundation\"}...)\n\t}\n\tlibgodir := GOOS + \"_\" + GOARCH\n\tswitch GOOS {\n\tcase \"darwin\":\n\t\tif GOARCH == \"arm\" || GOARCH == \"arm64\" {\n\t\t\tlibgodir += \"_shared\"\n\t\t}\n\tcase \"dragonfly\", \"freebsd\", \"linux\", \"netbsd\", \"openbsd\", \"solaris\":\n\t\tlibgodir += \"_shared\"\n\t}\n\tcc = append(cc, \"-I\", filepath.Join(\"pkg\", libgodir))\n\n\t\/\/ Build an environment with GOPATH=$(pwd)\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(2)\n\t}\n\tgopathEnv = append(os.Environ(), \"GOPATH=\"+dir)\n\tldlibrarypath = \"LD_LIBRARY_PATH=\" + dir\n\n\tif GOOS == \"windows\" {\n\t\texeSuffix = \".exe\"\n\t}\n}\n\nfunc goEnv(key string) string {\n\tout, err := exec.Command(\"go\", \"env\", key).Output()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"go env %s failed:\\n%s\", key, err)\n\t\tfmt.Fprintf(os.Stderr, \"%s\", err.(*exec.ExitError).Stderr)\n\t\tos.Exit(2)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\nfunc cmdToRun(name string) []string {\n\treturn []string{\".\/\" + name + exeSuffix}\n}\n\nfunc adbPush(t *testing.T, filename string) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\targs := append(\"adb\", \"push\", filename, fmt.Sprintf(\"%s\/%s\", androiddir, filename))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"adb command failed: %v\\n%s\\n\", err, out)\n\t}\n}\n\nfunc adbRun(t *testing.T, adbargs ...string) string {\n\tif GOOS != \"android\" {\n\t\tt.Fatalf(\"trying to run adb command when operating system is not android.\")\n\t}\n\targs := append(\"adb\", \"shell\")\n\targs = append(args, adbargs...)\n\tcmd := exec.Command(args[0], args[1:]...)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"adb command failed: %v\\n%s\\n\", err, out)\n\t}\n\n\treturn strings.Replace(string(out), \"\\r\", \"\", -1)\n}\n\nfunc runwithenv(t *testing.T, env []string, args ...string) string {\n\tif GOOS == \"android\" {\n\t\treturn adbRun(t, args...)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Env = env\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"command failed: %v\\n%v\\n%s\\n\", args, err, out)\n\t} else {\n\t\tt.Logf(\"run: %v\", args)\n\t}\n\n\treturn string(out)\n}\n\nfunc run(t *testing.T, args ...string) string {\n\tif GOOS == \"android\" {\n\t\treturn adbRun(t, args...)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"command failed: %v\\n%v\\n%s\\n\", args, err, out)\n\t} else {\n\t\tt.Logf(\"run: %v\", args)\n\t}\n\n\treturn string(out)\n}\n\nfunc runwithldlibrarypath(t *testing.T, args ...string) string {\n\treturn runwithenv(t, append(gopathEnv, ldlibrarypath), args...)\n}\n\nfunc rungocmd(t *testing.T, args ...string) string {\n\treturn runwithenv(t, gopathEnv, args...)\n}\n\nfunc createHeaders(t *testing.T) {\n\trungocmd(t,\n\t\t\"go\", \"install\",\n\t\t\"-buildmode=c-shared\", \"-installsuffix\",\n\t\t\"testcshared\", \"libgo\",\n\t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\", \"-installsuffix\",\n\t\t\"testcshared\", \"-o\", libgoname,\n\t\tfilepath.Join(\"src\", \"libgo\", \"libgo.go\"),\n\t)\n\tadbPush(t, libgoname)\n\n\tif GOOS == \"linux\" || GOOS == \"android\" {\n\t\tf, err := elf.Open(libgoname)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"elf.Open failed: \", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tif hasDynTag(t, f, elf.DT_TEXTREL) {\n\t\t\tt.Fatalf(\"%s has DT_TEXTREL flag\", libgoname)\n\t\t}\n\t}\n}\n\nfunc cleanupHeaders() {\n\tos.Remove(\"libgo.h\")\n}\n\nfunc setupAndroid(t *testing.T) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\tadbRun(t, \"mkdir\", \"-p\", androiddir)\n}\n\nfunc cleanupAndroid(t *testing.T) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\tadbRun(t, \"rm\", \"-rf\", androiddir)\n}\n\n\/\/ test0: exported symbols in shared lib are accessible.\nfunc TestExportedSymbols(t *testing.T) {\n\tcmd := \"testp\"\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\trun(t, append(cc, \"-I\", installdir, \"-o\", cmd, \"main0.c\", libgoname)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libgoname)\n\tdefer os.Remove(\"testp\")\n\n\tout := runwithldlibrarypath(t, bin...)\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test1: shared library can be dynamically loaded and exported symbols are accessible.\nfunc TestExportedSymbolsWithDynamicLoad(t *testing.T) {\n\tcmd := \"testp\"\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\trun(t, append(cc, \"-o\", cmd, \"main1.c\", \"-ldl\")...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libgoname)\n\tdefer os.Remove(cmd)\n\n\tout := run(t, append(bin, \".\/\"+libgoname)...)\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test2: tests libgo2 which does not export any functions.\nfunc TestUnexportedSymbols(t *testing.T) {\n\tcmd := \"testp2\"\n\tlibname := \"libgo2.\" + libSuffix\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\",\n\t\t\"-installsuffix\", \"testcshared\",\n\t\t\"-o\", libname, \"libgo2\",\n\t)\n\tadbPush(t, libname)\n\n\tlinkFlags := \"-Wl,--no-as-needed\"\n\tif GOOS == \"darwin\" {\n\t\tlinkFlags = \"\"\n\t}\n\n\trun(t, append(\n\t\tcc, \"-o\", cmd,\n\t\t\"main2.c\", linkFlags,\n\t\tlibname,\n\t)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libname)\n\tdefer os.Remove(cmd)\n\n\tout := runwithldlibrarypath(t, bin...)\n\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test3: tests main.main is exported on android.\nfunc TestMainExportedOnAndroid(t *testing.T) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\n\tcmd := \"testp3\"\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\trun(t, append(cc, \"-o\", cmd, \"main3.c\", \"-ldl\")...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libgoname)\n\tdefer os.Remove(cmd)\n\n\tout := run(t, append(bin, \".\/\"+libgoname)...)\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test4: test signal handlers\nfunc TestSignalHandlers(t *testing.T) {\n\tcmd := \"testp4\"\n\tlibname := \"libgo4.\" + libSuffix\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\",\n\t\t\"-installsuffix\", \"testcshared\",\n\t\t\"-o\", libname, \"libgo4\",\n\t)\n\tadbPush(t, libname)\n\trun(t, append(\n\t\tcc, \"-pthread\", \"-o\", cmd,\n\t\t\"main4.c\", \"-ldl\",\n\t)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libname)\n\tdefer os.Remove(cmd)\n\tdefer os.Remove(\"libgo4.h\")\n\n\tout := run(t, append(bin, \".\/\"+libname)...)\n\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(run(t, append(bin, libname, \"verbose\")...))\n\t}\n}\n\n\/\/ test5: test signal handlers with os\/signal.Notify\nfunc TestSignalHandlersWithNotify(t *testing.T) {\n\tcmd := \"testp5\"\n\tlibname := \"libgo5.\" + libSuffix\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\",\n\t\t\"-installsuffix\", \"testcshared\",\n\t\t\"-o\", libname, \"libgo5\",\n\t)\n\tadbPush(t, libname)\n\trun(t, append(\n\t\tcc, \"-pthread\", \"-o\", cmd,\n\t\t\"main5.c\", \"-ldl\",\n\t)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libname)\n\tdefer os.Remove(cmd)\n\tdefer os.Remove(\"libgo5.h\")\n\n\tout := run(t, append(bin, \".\/\"+libname)...)\n\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(run(t, append(bin, libname, \"verbose\")...))\n\t}\n}\n\nfunc TestPIE(t *testing.T) {\n\tswitch GOOS {\n\tcase \"linux\", \"android\":\n\t\tbreak\n\tdefault:\n\t\tt.Logf(\"Skipping TestPIE on %s\", GOOS)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tos.RemoveAll(\"pkg\")\n\t}()\n\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\tf, err := elf.Open(libgoname)\n\tif err != nil {\n\t\tt.Fatal(\"elf.Open failed: \", err)\n\t}\n\tdefer f.Close()\n\tif hasDynTag(t, f, elf.DT_TEXTREL) {\n\t\tt.Errorf(\"%s has DT_TEXTREL flag\", libgoname)\n\t}\n}\n\nfunc hasDynTag(t *testing.T, f *elf.File, tag elf.DynTag) bool {\n\tds := f.SectionByType(elf.SHT_DYNAMIC)\n\tif ds == nil {\n\t\tt.Error(\"no SHT_DYNAMIC section\")\n\t\treturn false\n\t}\n\td, err := ds.Data()\n\tif err != nil {\n\t\tt.Errorf(\"can't read SHT_DYNAMIC contents: %v\", err)\n\t\treturn false\n\t}\n\tfor len(d) > 0 {\n\t\tvar t elf.DynTag\n\t\tswitch f.Class {\n\t\tcase elf.ELFCLASS32:\n\t\t\tt = elf.DynTag(f.ByteOrder.Uint32(d[:4]))\n\t\t\td = d[8:]\n\t\tcase elf.ELFCLASS64:\n\t\t\tt = elf.DynTag(f.ByteOrder.Uint64(d[:8]))\n\t\t\td = d[16:]\n\t\t}\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>misc\/cgo\/testcshared: fix syntax error in the test<commit_after>\/\/ Copyright 2017 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cshared_test\n\nimport (\n\t\"debug\/elf\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"unicode\"\n)\n\n\/\/ C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)).\nvar cc []string\n\n\/\/ An environment with GOPATH=$(pwd).\nvar gopathEnv []string\n\n\/\/ \".exe\" on Windows.\nvar exeSuffix string\n\nvar GOOS, GOARCH, GOROOT string\nvar installdir, androiddir, ldlibrarypath string\nvar libSuffix, libgoname string\n\nfunc init() {\n\tGOOS = goEnv(\"GOOS\")\n\tGOARCH = goEnv(\"GOARCH\")\n\tGOROOT = goEnv(\"GOROOT\")\n\n\tif _, err := os.Stat(GOROOT); os.IsNotExist(err) {\n\t\tlog.Fatalf(\"Unable able to find GOROOT at '%s'\", GOROOT)\n\t}\n\n\t\/\/ Directory where cgo headers and outputs will be installed.\n\t\/\/ The installation directory format varies depending on the platform.\n\tinstalldir = path.Join(\"pkg\", fmt.Sprintf(\"%s_%s_testcshared_shared\", GOOS, GOARCH))\n\tswitch GOOS {\n\tcase \"darwin\":\n\t\tlibSuffix = \"dylib\"\n\t\tinstalldir = path.Join(\"pkg\", fmt.Sprintf(\"%s_%s_testcshared\", GOOS, GOARCH))\n\tcase \"windows\":\n\t\tlibSuffix = \"dll\"\n\tdefault:\n\t\tlibSuffix = \"so\"\n\t}\n\n\tandroiddir = fmt.Sprintf(\"\/data\/local\/tmp\/testcshared-%d\", os.Getpid())\n\tlibgoname = \"libgo.\" + libSuffix\n\n\tccOut := goEnv(\"CC\")\n\tcc = []string{string(ccOut)}\n\n\tout := goEnv(\"GOGCCFLAGS\")\n\tquote := '\\000'\n\tstart := 0\n\tlastSpace := true\n\tbackslash := false\n\ts := string(out)\n\tfor i, c := range s {\n\t\tif quote == '\\000' && unicode.IsSpace(c) {\n\t\t\tif !lastSpace {\n\t\t\t\tcc = append(cc, s[start:i])\n\t\t\t\tlastSpace = true\n\t\t\t}\n\t\t} else {\n\t\t\tif lastSpace {\n\t\t\t\tstart = i\n\t\t\t\tlastSpace = false\n\t\t\t}\n\t\t\tif quote == '\\000' && !backslash && (c == '\"' || c == '\\'') {\n\t\t\t\tquote = c\n\t\t\t\tbackslash = false\n\t\t\t} else if !backslash && quote == c {\n\t\t\t\tquote = '\\000'\n\t\t\t} else if (quote == '\\000' || quote == '\"') && !backslash && c == '\\\\' {\n\t\t\t\tbackslash = true\n\t\t\t} else {\n\t\t\t\tbackslash = false\n\t\t\t}\n\t\t}\n\t}\n\tif !lastSpace {\n\t\tcc = append(cc, s[start:])\n\t}\n\n\tif GOOS == \"darwin\" {\n\t\t\/\/ For Darwin\/ARM.\n\t\t\/\/ TODO(crawshaw): can we do better?\n\t\tcc = append(cc, []string{\"-framework\", \"CoreFoundation\", \"-framework\", \"Foundation\"}...)\n\t}\n\tlibgodir := GOOS + \"_\" + GOARCH\n\tswitch GOOS {\n\tcase \"darwin\":\n\t\tif GOARCH == \"arm\" || GOARCH == \"arm64\" {\n\t\t\tlibgodir += \"_shared\"\n\t\t}\n\tcase \"dragonfly\", \"freebsd\", \"linux\", \"netbsd\", \"openbsd\", \"solaris\":\n\t\tlibgodir += \"_shared\"\n\t}\n\tcc = append(cc, \"-I\", filepath.Join(\"pkg\", libgodir))\n\n\t\/\/ Build an environment with GOPATH=$(pwd)\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(2)\n\t}\n\tgopathEnv = append(os.Environ(), \"GOPATH=\"+dir)\n\tldlibrarypath = \"LD_LIBRARY_PATH=\" + dir\n\n\tif GOOS == \"windows\" {\n\t\texeSuffix = \".exe\"\n\t}\n}\n\nfunc goEnv(key string) string {\n\tout, err := exec.Command(\"go\", \"env\", key).Output()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"go env %s failed:\\n%s\", key, err)\n\t\tfmt.Fprintf(os.Stderr, \"%s\", err.(*exec.ExitError).Stderr)\n\t\tos.Exit(2)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\nfunc cmdToRun(name string) []string {\n\treturn []string{\".\/\" + name + exeSuffix}\n}\n\nfunc adbPush(t *testing.T, filename string) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\targs := []string{\"adb\", \"push\", filename, fmt.Sprintf(\"%s\/%s\", androiddir, filename)}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"adb command failed: %v\\n%s\\n\", err, out)\n\t}\n}\n\nfunc adbRun(t *testing.T, adbargs ...string) string {\n\tif GOOS != \"android\" {\n\t\tt.Fatalf(\"trying to run adb command when operating system is not android.\")\n\t}\n\targs := []string{\"adb\", \"shell\"}\n\targs = append(args, adbargs...)\n\tcmd := exec.Command(args[0], args[1:]...)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"adb command failed: %v\\n%s\\n\", err, out)\n\t}\n\n\treturn strings.Replace(string(out), \"\\r\", \"\", -1)\n}\n\nfunc runwithenv(t *testing.T, env []string, args ...string) string {\n\tif GOOS == \"android\" {\n\t\treturn adbRun(t, args...)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Env = env\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"command failed: %v\\n%v\\n%s\\n\", args, err, out)\n\t} else {\n\t\tt.Logf(\"run: %v\", args)\n\t}\n\n\treturn string(out)\n}\n\nfunc run(t *testing.T, args ...string) string {\n\tif GOOS == \"android\" {\n\t\treturn adbRun(t, args...)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"command failed: %v\\n%v\\n%s\\n\", args, err, out)\n\t} else {\n\t\tt.Logf(\"run: %v\", args)\n\t}\n\n\treturn string(out)\n}\n\nfunc runwithldlibrarypath(t *testing.T, args ...string) string {\n\treturn runwithenv(t, append(gopathEnv, ldlibrarypath), args...)\n}\n\nfunc rungocmd(t *testing.T, args ...string) string {\n\treturn runwithenv(t, gopathEnv, args...)\n}\n\nfunc createHeaders(t *testing.T) {\n\trungocmd(t,\n\t\t\"go\", \"install\",\n\t\t\"-buildmode=c-shared\", \"-installsuffix\",\n\t\t\"testcshared\", \"libgo\",\n\t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\", \"-installsuffix\",\n\t\t\"testcshared\", \"-o\", libgoname,\n\t\tfilepath.Join(\"src\", \"libgo\", \"libgo.go\"),\n\t)\n\tadbPush(t, libgoname)\n\n\tif GOOS == \"linux\" || GOOS == \"android\" {\n\t\tf, err := elf.Open(libgoname)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"elf.Open failed: \", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tif hasDynTag(t, f, elf.DT_TEXTREL) {\n\t\t\tt.Fatalf(\"%s has DT_TEXTREL flag\", libgoname)\n\t\t}\n\t}\n}\n\nfunc cleanupHeaders() {\n\tos.Remove(\"libgo.h\")\n}\n\nfunc setupAndroid(t *testing.T) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\tadbRun(t, \"mkdir\", \"-p\", androiddir)\n}\n\nfunc cleanupAndroid(t *testing.T) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\tadbRun(t, \"rm\", \"-rf\", androiddir)\n}\n\n\/\/ test0: exported symbols in shared lib are accessible.\nfunc TestExportedSymbols(t *testing.T) {\n\tcmd := \"testp\"\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\trun(t, append(cc, \"-I\", installdir, \"-o\", cmd, \"main0.c\", libgoname)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libgoname)\n\tdefer os.Remove(\"testp\")\n\n\tout := runwithldlibrarypath(t, bin...)\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test1: shared library can be dynamically loaded and exported symbols are accessible.\nfunc TestExportedSymbolsWithDynamicLoad(t *testing.T) {\n\tcmd := \"testp\"\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\trun(t, append(cc, \"-o\", cmd, \"main1.c\", \"-ldl\")...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libgoname)\n\tdefer os.Remove(cmd)\n\n\tout := run(t, append(bin, \".\/\"+libgoname)...)\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test2: tests libgo2 which does not export any functions.\nfunc TestUnexportedSymbols(t *testing.T) {\n\tcmd := \"testp2\"\n\tlibname := \"libgo2.\" + libSuffix\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\",\n\t\t\"-installsuffix\", \"testcshared\",\n\t\t\"-o\", libname, \"libgo2\",\n\t)\n\tadbPush(t, libname)\n\n\tlinkFlags := \"-Wl,--no-as-needed\"\n\tif GOOS == \"darwin\" {\n\t\tlinkFlags = \"\"\n\t}\n\n\trun(t, append(\n\t\tcc, \"-o\", cmd,\n\t\t\"main2.c\", linkFlags,\n\t\tlibname,\n\t)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libname)\n\tdefer os.Remove(cmd)\n\n\tout := runwithldlibrarypath(t, bin...)\n\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test3: tests main.main is exported on android.\nfunc TestMainExportedOnAndroid(t *testing.T) {\n\tif GOOS != \"android\" {\n\t\treturn\n\t}\n\n\tcmd := \"testp3\"\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\trun(t, append(cc, \"-o\", cmd, \"main3.c\", \"-ldl\")...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libgoname)\n\tdefer os.Remove(cmd)\n\n\tout := run(t, append(bin, \".\/\"+libgoname)...)\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(out)\n\t}\n}\n\n\/\/ test4: test signal handlers\nfunc TestSignalHandlers(t *testing.T) {\n\tcmd := \"testp4\"\n\tlibname := \"libgo4.\" + libSuffix\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\",\n\t\t\"-installsuffix\", \"testcshared\",\n\t\t\"-o\", libname, \"libgo4\",\n\t)\n\tadbPush(t, libname)\n\trun(t, append(\n\t\tcc, \"-pthread\", \"-o\", cmd,\n\t\t\"main4.c\", \"-ldl\",\n\t)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libname)\n\tdefer os.Remove(cmd)\n\tdefer os.Remove(\"libgo4.h\")\n\n\tout := run(t, append(bin, \".\/\"+libname)...)\n\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(run(t, append(bin, libname, \"verbose\")...))\n\t}\n}\n\n\/\/ test5: test signal handlers with os\/signal.Notify\nfunc TestSignalHandlersWithNotify(t *testing.T) {\n\tcmd := \"testp5\"\n\tlibname := \"libgo5.\" + libSuffix\n\tbin := cmdToRun(cmd)\n\n\tsetupAndroid(t)\n\tdefer cleanupAndroid(t)\n\n\trungocmd(t,\n\t\t\"go\", \"build\",\n\t\t\"-buildmode=c-shared\",\n\t\t\"-installsuffix\", \"testcshared\",\n\t\t\"-o\", libname, \"libgo5\",\n\t)\n\tadbPush(t, libname)\n\trun(t, append(\n\t\tcc, \"-pthread\", \"-o\", cmd,\n\t\t\"main5.c\", \"-ldl\",\n\t)...)\n\tadbPush(t, cmd)\n\n\tdefer os.Remove(libname)\n\tdefer os.Remove(cmd)\n\tdefer os.Remove(\"libgo5.h\")\n\n\tout := run(t, append(bin, \".\/\"+libname)...)\n\n\tif strings.TrimSpace(out) != \"PASS\" {\n\t\tt.Error(run(t, append(bin, libname, \"verbose\")...))\n\t}\n}\n\nfunc TestPIE(t *testing.T) {\n\tswitch GOOS {\n\tcase \"linux\", \"android\":\n\t\tbreak\n\tdefault:\n\t\tt.Logf(\"Skipping TestPIE on %s\", GOOS)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tos.RemoveAll(\"pkg\")\n\t}()\n\n\tcreateHeaders(t)\n\tdefer cleanupHeaders()\n\n\tf, err := elf.Open(libgoname)\n\tif err != nil {\n\t\tt.Fatal(\"elf.Open failed: \", err)\n\t}\n\tdefer f.Close()\n\tif hasDynTag(t, f, elf.DT_TEXTREL) {\n\t\tt.Errorf(\"%s has DT_TEXTREL flag\", libgoname)\n\t}\n}\n\nfunc hasDynTag(t *testing.T, f *elf.File, tag elf.DynTag) bool {\n\tds := f.SectionByType(elf.SHT_DYNAMIC)\n\tif ds == nil {\n\t\tt.Error(\"no SHT_DYNAMIC section\")\n\t\treturn false\n\t}\n\td, err := ds.Data()\n\tif err != nil {\n\t\tt.Errorf(\"can't read SHT_DYNAMIC contents: %v\", err)\n\t\treturn false\n\t}\n\tfor len(d) > 0 {\n\t\tvar t elf.DynTag\n\t\tswitch f.Class {\n\t\tcase elf.ELFCLASS32:\n\t\t\tt = elf.DynTag(f.ByteOrder.Uint32(d[:4]))\n\t\t\td = d[8:]\n\t\tcase elf.ELFCLASS64:\n\t\t\tt = elf.DynTag(f.ByteOrder.Uint64(d[:8]))\n\t\t\td = d[16:]\n\t\t}\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/iam\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"testing\"\n)\n\nfunc TestAccAWSPolicyAttachment_basic(t *testing.T) {\n\tvar out iam.GetPolicyOutput\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSPolicyAttachmentDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSPolicyAttachConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentExists(\"aws_iam_policy_attachment.test-attachment\", 3, &out),\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentAttributes([]string{\"test-user\"}, []string{\"test-role\"}, []string{\"test-group\"}, &out),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSPolicyAttachConfigUpdate,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentExists(\"aws_iam_policy_attachment.test-attachment\", 6, &out),\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentAttributes([]string{\"test-user3\", \"test-user3\"}, []string{\"test-role2\", \"test-role3\"}, []string{\"test-group2\", \"test-group3\"}, &out),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\nfunc testAccCheckAWSPolicyAttachmentDestroy(s *terraform.State) error {\n\n\treturn nil\n}\n\nfunc testAccCheckAWSPolicyAttachmentExists(n string, c int, out *iam.ListEntitiesForPolicyOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No policy name is set\")\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).iamconn\n\t\tarn := rs.Primary.Attributes[\"policy_arn\"]\n\n\t\tresp, err := conn.GetPolicy(&iam.GetPolicyInput{\n\t\t\tPolicyARN: aws.String(arn),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error: Policy (%s) not found\", n)\n\t\t}\n\t\tif c != resp.Policy.AttachmentCount {\n\t\t\treturn fmt.Errorf(\"Error: Policy (%s) has wrong number of entities attached on initial creation\", n)\n\t\t}\n\t\tresp2, err := conn.ListEntitiesForPolicy(&iam.ListEntitiesForPolicyOutput{\n\t\t\tPolicyARN: aws.String(arn),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error: Failed to get entities for Policy (%s)\", arn)\n\t\t}\n\n\t\t*out = *resp2\n\t\treturn nil\n\t}\n}\nfunc testAccCheckAWSPolicyAttachmentAttributes(users []string, roles []string, groups []string, out *iam.ListEntitiesForPolicyOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tuc := len(u)\n\t\trc := len(r)\n\t\tgc := len(g)\n\n\t\tfor _, u := range users {\n\t\t\tfor _, pu := range out.PolicyUsers {\n\t\t\t\tif u == *pu.UserName {\n\t\t\t\t\tuc--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, r := range roles {\n\t\t\tfor _, pr := range out.PolicyRoles {\n\t\t\t\tif r == *pu.RoleName {\n\t\t\t\t\trc--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, g := range users {\n\t\t\tfor _, pg := range out.PolicyGroups {\n\t\t\t\tif g == *pu.GroupName {\n\t\t\t\t\tgc--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif uc != 0 || rc != 0 || gc != 0 {\n\t\t\treturn fmt.Errorf(\"Error: Number of attached users, roles, or groups was incorrect:\\n expected %d users and found %d\\nexpected %d roles and found %d\\nexpected %d groups and found %d\", len(users), (len(users) - uc), len(roles), (len(roles) - rc), len(groups), (len(groups) - gc))\n\t\t}\n\t}\n}\n\nconst testAccAWSPolicyAttachConfig = `\nresource \"aws_iam_user\" \"user\" {\n name = \"test-user\"\n}\nresource \"aws_iam_role\" \"role\" {\n name = \"test-role\"\n}\nresource \"aws_iam_group\" \"group\" {\n name = \"test-group\"\n}\n\nresource \"aws_iam_policy\" \"policy\" {\n name = \"test-policy\"\n description = \"A test policy\"\n policy = <<EOF\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"iam:ChangePassword\"\n ],\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }\n ]\n}\nEOF\n}\n\nresource \"aws_iam_policy_attachment\" \"test-attach\" {\n name = \"test-attachment\"\n users = [\"${aws_iam_user.user.name}\"]\n roles = [\"${aws_iam_role.role.name}\"]\n groups = [\"${aws_iam_group.group.name}\"]\n policy_arn = \"${aws_iam_policy.policy.arn}\"\n}\n`\n\nconst testAccAWSPolicyAttachConfigUpdate = `\nresource \"aws_iam_user\" \"user\" {\n name = \"test-user\"\n}\nresource \"aws_iam_user\" \"user2\" {\n name = \"test-user2\"\n}\nresource \"aws_iam_user\" \"user3\" {\n name = \"test-user3\"\n}\nresource \"aws_iam_role\" \"role\" {\n name = \"test-role\"\n}\nresource \"aws_iam_role\" \"role2\" {\n name = \"test-role2\"\n}\nresource \"aws_iam_role\" \"role3\" {\n name = \"test-role3\"\n}\nresource \"aws_iam_group\" \"group\" {\n name = \"test-group\"\n}\nresource \"aws_iam_group\" \"group2\" {\n name = \"test-group2\"\n}\nresource \"aws_iam_group\" \"group3\" {\n name = \"test-group3\"\n}\n\nresource \"aws_iam_policy\" \"policy\" {\n name = \"test-policy\"\n description = \"A test policy\"\n policy = <<EOF\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"iam:ChangePassword\"\n ],\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }\n ]\n}\nEOF\n}\n\nresource \"aws_iam_policy_attachment\" \"test-attach\" {\n name = \"test-attachment\"\n users = [\n \"${aws_iam_user.user2.name}\",\n \"${aws_iam_user.user3.name}\"\n ]\n roles = [\n \"${aws_iam_role.role2.name}\",\n \"${aws_iam_role.role3.name}\"\n ]\n groups = [\n \"${aws_iam_group.group2.name}\",\n \"${aws_iam_group.group3.name}\"\n ]\n policy_arn = \"${aws_iam_policy.policy.arn}\"\n}\n`\n<commit_msg>test works<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/iam\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"testing\"\n)\n\nfunc TestAccAWSPolicyAttachment_basic(t *testing.T) {\n\tvar out iam.ListEntitiesForPolicyOutput\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSPolicyAttachmentDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSPolicyAttachConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentExists(\"aws_iam_policy_attachment.test-attachment\", 3, &out),\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentAttributes([]string{\"test-user\"}, []string{\"test-role\"}, []string{\"test-group\"}, &out),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccAWSPolicyAttachConfigUpdate,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentExists(\"aws_iam_policy_attachment.test-attachment\", 6, &out),\n\t\t\t\t\ttestAccCheckAWSPolicyAttachmentAttributes([]string{\"test-user3\", \"test-user3\"}, []string{\"test-role2\", \"test-role3\"}, []string{\"test-group2\", \"test-group3\"}, &out),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\nfunc testAccCheckAWSPolicyAttachmentDestroy(s *terraform.State) error {\n\n\treturn nil\n}\n\nfunc testAccCheckAWSPolicyAttachmentExists(n string, c int64, out *iam.ListEntitiesForPolicyOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No policy name is set\")\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).iamconn\n\t\tarn := rs.Primary.Attributes[\"policy_arn\"]\n\n\t\tresp, err := conn.GetPolicy(&iam.GetPolicyInput{\n\t\t\tPolicyARN: aws.String(arn),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error: Policy (%s) not found\", n)\n\t\t}\n\t\tif c != *resp.Policy.AttachmentCount {\n\t\t\treturn fmt.Errorf(\"Error: Policy (%s) has wrong number of entities attached on initial creation\", n)\n\t\t}\n\t\tresp2, err := conn.ListEntitiesForPolicy(&iam.ListEntitiesForPolicyInput{\n\t\t\tPolicyARN: aws.String(arn),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error: Failed to get entities for Policy (%s)\", arn)\n\t\t}\n\n\t\t*out = *resp2\n\t\treturn nil\n\t}\n}\nfunc testAccCheckAWSPolicyAttachmentAttributes(users []string, roles []string, groups []string, out *iam.ListEntitiesForPolicyOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tuc := len(users)\n\t\trc := len(roles)\n\t\tgc := len(groups)\n\n\t\tfor _, u := range users {\n\t\t\tfor _, pu := range out.PolicyUsers {\n\t\t\t\tif u == *pu.UserName {\n\t\t\t\t\tuc--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, r := range roles {\n\t\t\tfor _, pr := range out.PolicyRoles {\n\t\t\t\tif r == *pr.RoleName {\n\t\t\t\t\trc--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, g := range users {\n\t\t\tfor _, pg := range out.PolicyGroups {\n\t\t\t\tif g == *pg.GroupName {\n\t\t\t\t\tgc--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif uc != 0 || rc != 0 || gc != 0 {\n\t\t\treturn fmt.Errorf(\"Error: Number of attached users, roles, or groups was incorrect:\\n expected %d users and found %d\\nexpected %d roles and found %d\\nexpected %d groups and found %d\", len(users), (len(users) - uc), len(roles), (len(roles) - rc), len(groups), (len(groups) - gc))\n\t\t}\n\t\treturn nil\n\t}\n}\n\nconst testAccAWSPolicyAttachConfig = `\nresource \"aws_iam_user\" \"user\" {\n name = \"test-user\"\n}\nresource \"aws_iam_role\" \"role\" {\n name = \"test-role\"\n}\nresource \"aws_iam_group\" \"group\" {\n name = \"test-group\"\n}\n\nresource \"aws_iam_policy\" \"policy\" {\n name = \"test-policy\"\n description = \"A test policy\"\n policy = <<EOF\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"iam:ChangePassword\"\n ],\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }\n ]\n}\nEOF\n}\n\nresource \"aws_iam_policy_attachment\" \"test-attach\" {\n name = \"test-attachment\"\n users = [\"${aws_iam_user.user.name}\"]\n roles = [\"${aws_iam_role.role.name}\"]\n groups = [\"${aws_iam_group.group.name}\"]\n policy_arn = \"${aws_iam_policy.policy.arn}\"\n}\n`\n\nconst testAccAWSPolicyAttachConfigUpdate = `\nresource \"aws_iam_user\" \"user\" {\n name = \"test-user\"\n}\nresource \"aws_iam_user\" \"user2\" {\n name = \"test-user2\"\n}\nresource \"aws_iam_user\" \"user3\" {\n name = \"test-user3\"\n}\nresource \"aws_iam_role\" \"role\" {\n name = \"test-role\"\n}\nresource \"aws_iam_role\" \"role2\" {\n name = \"test-role2\"\n}\nresource \"aws_iam_role\" \"role3\" {\n name = \"test-role3\"\n}\nresource \"aws_iam_group\" \"group\" {\n name = \"test-group\"\n}\nresource \"aws_iam_group\" \"group2\" {\n name = \"test-group2\"\n}\nresource \"aws_iam_group\" \"group3\" {\n name = \"test-group3\"\n}\n\nresource \"aws_iam_policy\" \"policy\" {\n name = \"test-policy\"\n description = \"A test policy\"\n policy = <<EOF\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"iam:ChangePassword\"\n ],\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }\n ]\n}\nEOF\n}\n\nresource \"aws_iam_policy_attachment\" \"test-attach\" {\n name = \"test-attachment\"\n users = [\n \"${aws_iam_user.user2.name}\",\n \"${aws_iam_user.user3.name}\"\n ]\n roles = [\n \"${aws_iam_role.role2.name}\",\n \"${aws_iam_role.role3.name}\"\n ]\n groups = [\n \"${aws_iam_group.group2.name}\",\n \"${aws_iam_group.group3.name}\"\n ]\n policy_arn = \"${aws_iam_policy.policy.arn}\"\n}\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build e2e\n\n\/*\n * Copyright 2020 The Knative Authors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage e2e\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/utils\/pointer\"\n\teventingduckv1beta1 \"knative.dev\/eventing\/pkg\/apis\/duck\/v1beta1\"\n\t\"knative.dev\/eventing\/pkg\/apis\/eventing\"\n\t\"knative.dev\/eventing\/pkg\/apis\/eventing\/v1beta1\"\n\t\"knative.dev\/eventing\/test\/e2e\/helpers\"\n\ttestlib \"knative.dev\/eventing\/test\/lib\"\n\t\"knative.dev\/eventing\/test\/lib\/resources\"\n)\n\n\/\/ ChannelBasedBrokerCreator creates a BrokerCreator that creates a broker based on the channel parameter.\nfunc ChannelBasedBrokerCreator(channel metav1.TypeMeta, brokerClass string) helpers.BrokerCreatorWithRetries {\n\treturn func(client *testlib.Client, numRetries int32) string {\n\t\tbrokerName := strings.ToLower(channel.Kind)\n\n\t\t\/\/ create a ConfigMap used by the broker.\n\t\tconfig := client.CreateBrokerConfigMapOrFail(\"config-\"+brokerName, &channel)\n\n\t\tbackoff := eventingduckv1beta1.BackoffPolicyLinear\n\n\t\t\/\/ create a new broker.\n\t\tclient.CreateBrokerV1Beta1OrFail(brokerName,\n\t\t\tresources.WithBrokerClassForBrokerV1Beta1(brokerClass),\n\t\t\tresources.WithConfigForBrokerV1Beta1(config),\n\t\t\tfunc(broker *v1beta1.Broker) {\n\t\t\t\tbroker.Spec.Delivery = &eventingduckv1beta1.DeliverySpec{\n\t\t\t\t\tRetry: &numRetries,\n\t\t\t\t\tBackoffPolicy: &backoff,\n\t\t\t\t\tBackoffDelay: pointer.StringPtr(\"PT1S\"),\n\t\t\t\t}\n\t\t\t},\n\t\t)\n\n\t\treturn brokerName\n\t}\n}\n\nfunc TestBrokerRedelivery(t *testing.T) {\n\tchannelTestRunner.RunTests(t, testlib.FeatureRedelivery, func(t *testing.T, component metav1.TypeMeta) {\n\n\t\tbrokerCreator := ChannelBasedBrokerCreator(component, eventing.MTChannelBrokerClassValue)\n\n\t\thelpers.BrokerRedelivery(t, brokerCreator)\n\t})\n}\n<commit_msg>Updating broker to be v1 in BrokerRedelivery test (#1511)<commit_after>\/\/ +build e2e\n\n\/*\n * Copyright 2020 The Knative Authors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage e2e\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/utils\/pointer\"\n\teventingduckv1 \"knative.dev\/eventing\/pkg\/apis\/duck\/v1\"\n\t\"knative.dev\/eventing\/pkg\/apis\/eventing\"\n\tv1 \"knative.dev\/eventing\/pkg\/apis\/eventing\/v1\"\n\t\"knative.dev\/eventing\/test\/e2e\/helpers\"\n\ttestlib \"knative.dev\/eventing\/test\/lib\"\n\t\"knative.dev\/eventing\/test\/lib\/resources\"\n)\n\n\/\/ ChannelBasedBrokerCreator creates a BrokerCreator that creates a broker based on the channel parameter.\nfunc ChannelBasedBrokerCreator(channel metav1.TypeMeta, brokerClass string) helpers.BrokerCreatorWithRetries {\n\treturn func(client *testlib.Client, numRetries int32) string {\n\t\tbrokerName := strings.ToLower(channel.Kind)\n\n\t\t\/\/ create a ConfigMap used by the broker.\n\t\tconfig := client.CreateBrokerConfigMapOrFail(\"config-\"+brokerName, &channel)\n\n\t\tbackoff := eventingduckv1.BackoffPolicyLinear\n\n\t\t\/\/ create a new broker.\n\t\tclient.CreateBrokerV1OrFail(brokerName,\n\t\t\tresources.WithBrokerClassForBrokerV1(brokerClass),\n\t\t\tresources.WithConfigForBrokerV1(config),\n\t\t\tfunc(broker *v1.Broker) {\n\t\t\t\tbroker.Spec.Delivery = &eventingduckv1.DeliverySpec{\n\t\t\t\t\tRetry: &numRetries,\n\t\t\t\t\tBackoffPolicy: &backoff,\n\t\t\t\t\tBackoffDelay: pointer.StringPtr(\"PT1S\"),\n\t\t\t\t}\n\t\t\t},\n\t\t)\n\n\t\treturn brokerName\n\t}\n}\n\nfunc TestBrokerRedelivery(t *testing.T) {\n\tchannelTestRunner.RunTests(t, testlib.FeatureRedelivery, func(t *testing.T, component metav1.TypeMeta) {\n\n\t\tbrokerCreator := ChannelBasedBrokerCreator(component, eventing.MTChannelBrokerClassValue)\n\n\t\thelpers.BrokerRedelivery(t, brokerCreator)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package multisig\n\nimport (\n\t\"encoding\/json\"\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tcmn \"github.com\/tendermint\/tendermint\/libs\/common\"\n)\n\nfunc randCompactBitArray(bits int) (*CompactBitArray, []byte) {\n\tnumBytes := (bits + 7) \/ 8\n\tsrc := cmn.RandBytes((bits + 7) \/ 8)\n\tbA := NewCompactBitArray(bits)\n\n\tfor i := 0; i < numBytes-1; i++ {\n\t\tfor j := uint8(0); j < 8; j++ {\n\t\t\tbA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0)\n\t\t}\n\t}\n\t\/\/ Set remaining bits\n\tfor i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ {\n\t\tbA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0)\n\t}\n\treturn bA, src\n}\n\nfunc TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) {\n\tbitList := []int{-127, -128, -1 << 31}\n\tfor _, bits := range bitList {\n\t\t_ = NewCompactBitArray(bits)\n\t}\n}\n\nfunc TestJSONMarshalUnmarshal(t *testing.T) {\n\n\tbA1 := NewCompactBitArray(0)\n\tbA2 := NewCompactBitArray(1)\n\n\tbA3 := NewCompactBitArray(1)\n\tbA3.SetIndex(0, true)\n\n\tbA4 := NewCompactBitArray(5)\n\tbA4.SetIndex(0, true)\n\tbA4.SetIndex(1, true)\n\n\tbA5 := NewCompactBitArray(9)\n\tbA5.SetIndex(0, true)\n\tbA5.SetIndex(1, true)\n\tbA5.SetIndex(8, true)\n\n\tbA6 := NewCompactBitArray(16)\n\tbA6.SetIndex(0, true)\n\tbA6.SetIndex(1, true)\n\tbA6.SetIndex(8, false)\n\tbA6.SetIndex(15, true)\n\n\ttestCases := []struct {\n\t\tbA *CompactBitArray\n\t\tmarshalledBA string\n\t}{\n\t\t{nil, `null`},\n\t\t{bA1, `null`},\n\t\t{bA2, `\"_\"`},\n\t\t{bA3, `\"x\"`},\n\t\t{bA4, `\"xx___\"`},\n\t\t{bA5, `\"xx______x\"`},\n\t\t{bA6, `\"xx_____________x\"`},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.bA.String(), func(t *testing.T) {\n\t\t\tbz, err := json.Marshal(tc.bA)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, tc.marshalledBA, string(bz))\n\n\t\t\tvar unmarshalledBA *CompactBitArray\n\t\t\terr = json.Unmarshal(bz, &unmarshalledBA)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif tc.bA == nil {\n\t\t\t\trequire.Nil(t, unmarshalledBA)\n\t\t\t} else {\n\t\t\t\trequire.NotNil(t, unmarshalledBA)\n\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\tif assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {\n\t\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompactMarshalUnmarshal(t *testing.T) {\n\tbA1 := NewCompactBitArray(0)\n\tbA2 := NewCompactBitArray(1)\n\n\tbA3 := NewCompactBitArray(1)\n\tbA3.SetIndex(0, true)\n\n\tbA4 := NewCompactBitArray(5)\n\tbA4.SetIndex(0, true)\n\tbA4.SetIndex(1, true)\n\n\tbA5 := NewCompactBitArray(9)\n\tbA5.SetIndex(0, true)\n\tbA5.SetIndex(1, true)\n\tbA5.SetIndex(8, true)\n\n\tbA6 := NewCompactBitArray(16)\n\tbA6.SetIndex(0, true)\n\tbA6.SetIndex(1, true)\n\tbA6.SetIndex(8, false)\n\tbA6.SetIndex(15, true)\n\n\ttestCases := []struct {\n\t\tbA *CompactBitArray\n\t\tmarshalledBA []byte\n\t}{\n\t\t{nil, []byte(\"null\")},\n\t\t{bA1, []byte(\"null\")},\n\t\t{bA2, []byte{byte(1), byte(0)}},\n\t\t{bA3, []byte{byte(1), byte(128)}},\n\t\t{bA4, []byte{byte(5), byte(192)}},\n\t\t{bA5, []byte{byte(9), byte(192), byte(128)}},\n\t\t{bA6, []byte{byte(16), byte(192), byte(1)}},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.bA.String(), func(t *testing.T) {\n\t\t\tbz := tc.bA.CompactMarshal()\n\n\t\t\tassert.Equal(t, tc.marshalledBA, bz)\n\n\t\t\tunmarshalledBA, err := CompactUnmarshal(bz)\n\t\t\trequire.NoError(t, err)\n\t\t\tif tc.bA == nil {\n\t\t\t\trequire.Nil(t, unmarshalledBA)\n\t\t\t} else {\n\t\t\t\trequire.NotNil(t, unmarshalledBA)\n\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\tif assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {\n\t\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) {\n\ttestCases := []struct {\n\t\tmarshalledBA string\n\t\tbAIndex []int\n\t\ttrueValueIndex []int\n\t}{\n\t\t{`\"_____\"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}},\n\t\t{`\"x\"`, []int{0}, []int{0}},\n\t\t{`\"_x\"`, []int{1}, []int{0}},\n\t\t{`\"x___xxxx\"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}},\n\t\t{`\"__x_xx_x__x_x___\"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}},\n\t\t{`\"______________xx\"`, []int{14, 15}, []int{0, 1}},\n\t}\n\tfor tcIndex, tc := range testCases {\n\t\tt.Run(tc.marshalledBA, func(t *testing.T) {\n\t\t\tvar bA *CompactBitArray\n\t\t\terr := json.Unmarshal([]byte(tc.marshalledBA), &bA)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor i := 0; i < len(tc.bAIndex); i++ {\n\t\t\t\trequire.Equal(t, tc.trueValueIndex[i], bA.NumOfTrueBitsBefore(tc.bAIndex[i]), \"tc %d, i %d\", tcIndex, i)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompactBitArrayGetSetIndex(t *testing.T) {\n\tr := rand.New(rand.NewSource(100))\n\tnumTests := 10\n\tnumBitsPerArr := 100\n\tfor i := 0; i < numTests; i++ {\n\t\tbits := r.Intn(1000)\n\t\tbA, _ := randCompactBitArray(bits)\n\n\t\tfor j := 0; j < numBitsPerArr; j++ {\n\t\t\tcopy := bA.Copy()\n\t\t\tindex := r.Intn(bits)\n\t\t\tval := (r.Int63() % 2) == 0\n\t\t\tbA.SetIndex(index, val)\n\t\t\trequire.Equal(t, val, bA.GetIndex(index), \"bA.SetIndex(%d, %v) failed on bit array: %s\", index, val, copy)\n\t\t}\n\t}\n}\n<commit_msg>(squash this) Fix build errors<commit_after>package multisig\n\nimport (\n\t\"encoding\/json\"\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tcmn \"github.com\/tendermint\/tendermint\/libs\/common\"\n)\n\nfunc randCompactBitArray(bits int) (*CompactBitArray, []byte) {\n\tnumBytes := (bits + 7) \/ 8\n\tsrc := cmn.RandBytes((bits + 7) \/ 8)\n\tbA := NewCompactBitArray(bits)\n\n\tfor i := 0; i < numBytes-1; i++ {\n\t\tfor j := uint8(0); j < 8; j++ {\n\t\t\tbA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0)\n\t\t}\n\t}\n\t\/\/ Set remaining bits\n\tfor i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ {\n\t\tbA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0)\n\t}\n\treturn bA, src\n}\n\nfunc TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) {\n\tbitList := []int{-127, -128, -1 << 31}\n\tfor _, bits := range bitList {\n\t\t_ = NewCompactBitArray(bits)\n\t}\n}\n\nfunc TestJSONMarshalUnmarshal(t *testing.T) {\n\n\tbA1 := NewCompactBitArray(0)\n\tbA2 := NewCompactBitArray(1)\n\n\tbA3 := NewCompactBitArray(1)\n\tbA3.SetIndex(0, true)\n\n\tbA4 := NewCompactBitArray(5)\n\tbA4.SetIndex(0, true)\n\tbA4.SetIndex(1, true)\n\n\tbA5 := NewCompactBitArray(9)\n\tbA5.SetIndex(0, true)\n\tbA5.SetIndex(1, true)\n\tbA5.SetIndex(8, true)\n\n\tbA6 := NewCompactBitArray(16)\n\tbA6.SetIndex(0, true)\n\tbA6.SetIndex(1, true)\n\tbA6.SetIndex(8, false)\n\tbA6.SetIndex(15, true)\n\n\ttestCases := []struct {\n\t\tbA *CompactBitArray\n\t\tmarshalledBA string\n\t}{\n\t\t{nil, `null`},\n\t\t{bA1, `null`},\n\t\t{bA2, `\"_\"`},\n\t\t{bA3, `\"x\"`},\n\t\t{bA4, `\"xx___\"`},\n\t\t{bA5, `\"xx______x\"`},\n\t\t{bA6, `\"xx_____________x\"`},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.bA.String(), func(t *testing.T) {\n\t\t\tbz, err := json.Marshal(tc.bA)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, tc.marshalledBA, string(bz))\n\n\t\t\tvar unmarshalledBA *CompactBitArray\n\t\t\terr = json.Unmarshal(bz, &unmarshalledBA)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif tc.bA == nil {\n\t\t\t\trequire.Nil(t, unmarshalledBA)\n\t\t\t} else {\n\t\t\t\trequire.NotNil(t, unmarshalledBA)\n\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\tif assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {\n\t\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompactMarshalUnmarshal(t *testing.T) {\n\tbA1 := NewCompactBitArray(0)\n\tbA2 := NewCompactBitArray(1)\n\n\tbA3 := NewCompactBitArray(1)\n\tbA3.SetIndex(0, true)\n\n\tbA4 := NewCompactBitArray(5)\n\tbA4.SetIndex(0, true)\n\tbA4.SetIndex(1, true)\n\n\tbA5 := NewCompactBitArray(9)\n\tbA5.SetIndex(0, true)\n\tbA5.SetIndex(1, true)\n\tbA5.SetIndex(8, true)\n\n\tbA6 := NewCompactBitArray(16)\n\tbA6.SetIndex(0, true)\n\tbA6.SetIndex(1, true)\n\tbA6.SetIndex(8, false)\n\tbA6.SetIndex(15, true)\n\n\ttestCases := []struct {\n\t\tbA *CompactBitArray\n\t\tmarshalledBA []byte\n\t}{\n\t\t{nil, []byte(\"null\")},\n\t\t{bA1, []byte(\"null\")},\n\t\t{bA2, []byte{byte(1), byte(0)}},\n\t\t{bA3, []byte{byte(1), byte(128)}},\n\t\t{bA4, []byte{byte(5), byte(192)}},\n\t\t{bA5, []byte{byte(9), byte(192), byte(128)}},\n\t\t{bA6, []byte{byte(16), byte(192), byte(1)}},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.bA.String(), func(t *testing.T) {\n\t\t\tbz := tc.bA.CompactMarshal()\n\n\t\t\tassert.Equal(t, tc.marshalledBA, bz)\n\n\t\t\tunmarshalledBA, err := CompactUnmarshal(bz)\n\t\t\trequire.NoError(t, err)\n\t\t\tif tc.bA == nil {\n\t\t\t\trequire.Nil(t, unmarshalledBA)\n\t\t\t} else {\n\t\t\t\trequire.NotNil(t, unmarshalledBA)\n\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\tif assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {\n\t\t\t\t\tassert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) {\n\ttestCases := []struct {\n\t\tmarshalledBA string\n\t\tbAIndex []int\n\t\ttrueValueIndex []int\n\t}{\n\t\t{`\"_____\"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}},\n\t\t{`\"x\"`, []int{0}, []int{0}},\n\t\t{`\"_x\"`, []int{1}, []int{0}},\n\t\t{`\"x___xxxx\"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}},\n\t\t{`\"__x_xx_x__x_x___\"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}},\n\t\t{`\"______________xx\"`, []int{14, 15}, []int{0, 1}},\n\t}\n\tfor tcIndex, tc := range testCases {\n\t\tt.Run(tc.marshalledBA, func(t *testing.T) {\n\t\t\tvar bA *CompactBitArray\n\t\t\terr := json.Unmarshal([]byte(tc.marshalledBA), &bA)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor i := 0; i < len(tc.bAIndex); i++ {\n\t\t\t\trequire.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), \"tc %d, i %d\", tcIndex, i)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompactBitArrayGetSetIndex(t *testing.T) {\n\tr := rand.New(rand.NewSource(100))\n\tnumTests := 10\n\tnumBitsPerArr := 100\n\tfor i := 0; i < numTests; i++ {\n\t\tbits := r.Intn(1000)\n\t\tbA, _ := randCompactBitArray(bits)\n\n\t\tfor j := 0; j < numBitsPerArr; j++ {\n\t\t\tcopy := bA.Copy()\n\t\t\tindex := r.Intn(bits)\n\t\t\tval := (r.Int63() % 2) == 0\n\t\t\tbA.SetIndex(index, val)\n\t\t\trequire.Equal(t, val, bA.GetIndex(index), \"bA.SetIndex(%d, %v) failed on bit array: %s\", index, val, copy)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package rafttest\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/coreos\/etcd\/raft\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n)\n\ntype node struct {\n\traft.Node\n\tid uint64\n\tiface iface\n\tstopc chan struct{}\n\tpausec chan bool\n\n\t\/\/ stable\n\tstorage *raft.MemoryStorage\n\tstate raftpb.HardState\n}\n\nfunc startNode(id uint64, peers []raft.Peer, iface iface) *node {\n\tst := raft.NewMemoryStorage()\n\trn := raft.StartNode(id, peers, 10, 1, st)\n\tn := &node{\n\t\tNode: rn,\n\t\tid: id,\n\t\tstorage: st,\n\t\tiface: iface,\n\t\tpausec: make(chan bool),\n\t}\n\tn.start()\n\treturn n\n}\n\nfunc (n *node) start() {\n\tn.stopc = make(chan struct{})\n\tticker := time.Tick(5 * time.Millisecond)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker:\n\t\t\t\tn.Tick()\n\t\t\tcase rd := <-n.Ready():\n\t\t\t\tif !raft.IsEmptyHardState(rd.HardState) {\n\t\t\t\t\tn.state = rd.HardState\n\t\t\t\t\tn.storage.SetHardState(n.state)\n\t\t\t\t}\n\t\t\t\tn.storage.Append(rd.Entries)\n\t\t\t\t\/\/ TODO: make send async, more like real world...\n\t\t\t\tfor _, m := range rd.Messages {\n\t\t\t\t\tn.iface.send(m)\n\t\t\t\t}\n\t\t\t\tn.Advance()\n\t\t\tcase m := <-n.iface.recv():\n\t\t\t\tn.Step(context.TODO(), m)\n\t\t\tcase <-n.stopc:\n\t\t\t\tn.Stop()\n\t\t\t\traftLogger.Infof(\"raft.%d: stop\", n.id)\n\t\t\t\tn.Node = nil\n\t\t\t\tclose(n.stopc)\n\t\t\t\treturn\n\t\t\tcase p := <-n.pausec:\n\t\t\t\trecvms := make([]raftpb.Message, 0)\n\t\t\t\tfor p {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase m := <-n.iface.recv():\n\t\t\t\t\t\trecvms = append(recvms, m)\n\t\t\t\t\tcase p = <-n.pausec:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ step all pending messages\n\t\t\t\tfor _, m := range recvms {\n\t\t\t\t\tn.Step(context.TODO(), m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ stop stops the node. stop a stopped node might panic.\n\/\/ All in memory state of node is discarded.\n\/\/ All stable MUST be unchanged.\nfunc (n *node) stop() {\n\tn.iface.disconnect()\n\tn.stopc <- struct{}{}\n\t\/\/ wait for the shutdown\n\t<-n.stopc\n}\n\n\/\/ restart restarts the node. restart a started node\n\/\/ blocks and might affect the future stop operation.\nfunc (n *node) restart() {\n\t\/\/ wait for the shutdown\n\t<-n.stopc\n\tn.Node = raft.RestartNode(n.id, 10, 1, n.storage, 0)\n\tn.start()\n\tn.iface.connect()\n}\n\n\/\/ pause pauses the node.\n\/\/ The paused node buffers the received messages and replies\n\/\/ all of them when it resumes.\nfunc (n *node) pause() {\n\tn.pausec <- true\n}\n\n\/\/ resume resumes the paused node.\nfunc (n *node) resume() {\n\tn.pausec <- false\n}\n<commit_msg>rafttest: fix build error<commit_after>package rafttest\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/coreos\/etcd\/raft\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n)\n\ntype node struct {\n\traft.Node\n\tid uint64\n\tiface iface\n\tstopc chan struct{}\n\tpausec chan bool\n\n\t\/\/ stable\n\tstorage *raft.MemoryStorage\n\tstate raftpb.HardState\n}\n\nfunc startNode(id uint64, peers []raft.Peer, iface iface) *node {\n\tst := raft.NewMemoryStorage()\n\trn := raft.StartNode(id, peers, 10, 1, st)\n\tn := &node{\n\t\tNode: rn,\n\t\tid: id,\n\t\tstorage: st,\n\t\tiface: iface,\n\t\tpausec: make(chan bool),\n\t}\n\tn.start()\n\treturn n\n}\n\nfunc (n *node) start() {\n\tn.stopc = make(chan struct{})\n\tticker := time.Tick(5 * time.Millisecond)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker:\n\t\t\t\tn.Tick()\n\t\t\tcase rd := <-n.Ready():\n\t\t\t\tif !raft.IsEmptyHardState(rd.HardState) {\n\t\t\t\t\tn.state = rd.HardState\n\t\t\t\t\tn.storage.SetHardState(n.state)\n\t\t\t\t}\n\t\t\t\tn.storage.Append(rd.Entries)\n\t\t\t\t\/\/ TODO: make send async, more like real world...\n\t\t\t\tfor _, m := range rd.Messages {\n\t\t\t\t\tn.iface.send(m)\n\t\t\t\t}\n\t\t\t\tn.Advance()\n\t\t\tcase m := <-n.iface.recv():\n\t\t\t\tn.Step(context.TODO(), m)\n\t\t\tcase <-n.stopc:\n\t\t\t\tn.Stop()\n\t\t\t\tlog.Printf(\"raft.%d: stop\", n.id)\n\t\t\t\tn.Node = nil\n\t\t\t\tclose(n.stopc)\n\t\t\t\treturn\n\t\t\tcase p := <-n.pausec:\n\t\t\t\trecvms := make([]raftpb.Message, 0)\n\t\t\t\tfor p {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase m := <-n.iface.recv():\n\t\t\t\t\t\trecvms = append(recvms, m)\n\t\t\t\t\tcase p = <-n.pausec:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ step all pending messages\n\t\t\t\tfor _, m := range recvms {\n\t\t\t\t\tn.Step(context.TODO(), m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ stop stops the node. stop a stopped node might panic.\n\/\/ All in memory state of node is discarded.\n\/\/ All stable MUST be unchanged.\nfunc (n *node) stop() {\n\tn.iface.disconnect()\n\tn.stopc <- struct{}{}\n\t\/\/ wait for the shutdown\n\t<-n.stopc\n}\n\n\/\/ restart restarts the node. restart a started node\n\/\/ blocks and might affect the future stop operation.\nfunc (n *node) restart() {\n\t\/\/ wait for the shutdown\n\t<-n.stopc\n\tn.Node = raft.RestartNode(n.id, 10, 1, n.storage, 0)\n\tn.start()\n\tn.iface.connect()\n}\n\n\/\/ pause pauses the node.\n\/\/ The paused node buffers the received messages and replies\n\/\/ all of them when it resumes.\nfunc (n *node) pause() {\n\tn.pausec <- true\n}\n\n\/\/ resume resumes the paused node.\nfunc (n *node) resume() {\n\tn.pausec <- false\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/gin-gonic\/gin\"\n)\n\n\/\/ Version of IronFunctions\nvar Version = \"0.0.45\"\n\nfunc handleVersion(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"version\": Version})\n}\n<commit_msg>functions: 0.0.46 release [skip ci]<commit_after>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/gin-gonic\/gin\"\n)\n\n\/\/ Version of IronFunctions\nvar Version = \"0.0.46\"\n\nfunc handleVersion(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"version\": Version})\n}\n<|endoftext|>"} {"text":"<commit_before>package connections\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/sirupsen\/logrus\/hooks\/test\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc Test_ConnectionGroupManager_Add_GeneratesValidIDs(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 0,\n\t\tlogger: logger,\n\t}\n\n\tfor i := 0; i < groupLimit; i++ {\n\t\tid, err := m.Add(&ConnectionGroup{\n\t\t\tlimit: connsLimit \/ groupLimit,\n\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\tgame: nil,\n\t\t\tbroadcast: nil,\n\t\t\tlogger: logger,\n\t\t\tchs: nil,\n\t\t\tchsMux: nil,\n\t\t\tstop: nil,\n\t\t\tstopper: nil,\n\t\t})\n\t\trequire.Equal(t, m.connsCount, (1+i)*(connsLimit\/groupLimit), \"unexpected conns count\")\n\t\trequire.Equal(t, i+firstGroupId, id, \"unexpected group id\")\n\t\trequire.Nil(t, err, \"unexpected error\")\n\t}\n}\n\nfunc Test_ConnectionGroupManager_Add_GetErrGroupLimitReached(t *testing.T) {\n\tconst groupLimit = 2\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: connsLimit \/ groupLimit,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t2: {\n\t\t\t\tlimit: connsLimit \/ groupLimit,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 1,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Zero(t, id)\n\trequire.Equal(t, ErrGroupLimitReached, err)\n}\n\nfunc Test_ConnectionGroupManager_Add_GetErrConnsLimitReached(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 50,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t2: {\n\t\t\t\tlimit: 50,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: connsLimit,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Zero(t, id)\n\trequire.Equal(t, ErrConnsLimitReached, err)\n}\n\nfunc Test_ConnectionGroupManager_Add_AddOneGroupGetValidID(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t2: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 20,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Equal(t, 3, id)\n\trequire.Equal(t, 30, m.connsCount)\n\trequire.Nil(t, err)\n}\n\nfunc Test_ConnectionGroupManager_Add_InsertOneGroupGetValidID(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t3: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 20,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Equal(t, 2, id)\n\trequire.Equal(t, 30, m.connsCount)\n\trequire.Nil(t, err)\n}\n\nfunc Test_ConnectionGroupManager_Delete_DeleteNotFoundGroup(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 0,\n\t\tlogger: logger,\n\t}\n\n\terr := m.Delete(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Equal(t, ErrDeleteNotFoundGroup, err)\n}\n\nfunc Test_ConnectionGroupManager_Delete_MethodDeletesGroupSuccessfully(t *testing.T) {\n\tconst (\n\t\tgroupLimit = 10\n\t\tconnsLimit = 100\n\t\tgroupID = 2\n\t)\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tgroup := &ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t}\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\tgroupID: group,\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 10,\n\t\tlogger: logger,\n\t}\n\n\terr := m.Delete(group)\n\n\trequire.Nil(t, err)\n\trequire.Zero(t, m.connsCount)\n\trequire.Empty(t, m.groups)\n}\n<commit_msg>Create test for connection group manager for case of returning valid group count<commit_after>package connections\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/sirupsen\/logrus\/hooks\/test\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc Test_ConnectionGroupManager_Add_GeneratesValidIDs(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 0,\n\t\tlogger: logger,\n\t}\n\n\tfor i := 0; i < groupLimit; i++ {\n\t\tid, err := m.Add(&ConnectionGroup{\n\t\t\tlimit: connsLimit \/ groupLimit,\n\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\tgame: nil,\n\t\t\tbroadcast: nil,\n\t\t\tlogger: logger,\n\t\t\tchs: nil,\n\t\t\tchsMux: nil,\n\t\t\tstop: nil,\n\t\t\tstopper: nil,\n\t\t})\n\t\trequire.Equal(t, m.connsCount, (1+i)*(connsLimit\/groupLimit), \"unexpected conns count\")\n\t\trequire.Equal(t, i+firstGroupId, id, \"unexpected group id\")\n\t\trequire.Nil(t, err, \"unexpected error\")\n\t}\n}\n\nfunc Test_ConnectionGroupManager_Add_GetErrGroupLimitReached(t *testing.T) {\n\tconst groupLimit = 2\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: connsLimit \/ groupLimit,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t2: {\n\t\t\t\tlimit: connsLimit \/ groupLimit,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 1,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Zero(t, id)\n\trequire.Equal(t, ErrGroupLimitReached, err)\n}\n\nfunc Test_ConnectionGroupManager_Add_GetErrConnsLimitReached(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 50,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t2: {\n\t\t\t\tlimit: 50,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: connsLimit,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Zero(t, id)\n\trequire.Equal(t, ErrConnsLimitReached, err)\n}\n\nfunc Test_ConnectionGroupManager_Add_AddOneGroupGetValidID(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t2: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 20,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Equal(t, 3, id)\n\trequire.Equal(t, 30, m.connsCount)\n\trequire.Nil(t, err)\n}\n\nfunc Test_ConnectionGroupManager_Add_InsertOneGroupGetValidID(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t3: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 20,\n\t\tlogger: logger,\n\t}\n\n\tid, err := m.Add(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Equal(t, 2, id)\n\trequire.Equal(t, 30, m.connsCount)\n\trequire.Nil(t, err)\n}\n\nfunc Test_ConnectionGroupManager_Delete_DeleteNotFoundGroup(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 0,\n\t\tlogger: logger,\n\t}\n\n\terr := m.Delete(&ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t})\n\n\trequire.Equal(t, ErrDeleteNotFoundGroup, err)\n}\n\nfunc Test_ConnectionGroupManager_Delete_MethodDeletesGroupSuccessfully(t *testing.T) {\n\tconst (\n\t\tgroupLimit = 10\n\t\tconnsLimit = 100\n\t\tgroupID = 2\n\t)\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tgroup := &ConnectionGroup{\n\t\tlimit: 10,\n\t\tcounterMux: &sync.RWMutex{},\n\t\tgame: nil,\n\t\tbroadcast: nil,\n\t\tlogger: logger,\n\t\tchs: nil,\n\t\tchsMux: nil,\n\t\tstop: nil,\n\t\tstopper: nil,\n\t}\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\tgroupID: group,\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 10,\n\t\tlogger: logger,\n\t}\n\n\terr := m.Delete(group)\n\n\trequire.Nil(t, err)\n\trequire.Zero(t, m.connsCount)\n\trequire.Empty(t, m.groups)\n}\n\nfunc Test_ConnectionGroupManager_GroupCount_ReturnsValidGroupCount(t *testing.T) {\n\tconst groupLimit = 10\n\tconst connsLimit = 100\n\n\tlogger, hook := test.NewNullLogger()\n\tdefer hook.Reset()\n\n\tm := &ConnectionGroupManager{\n\t\tgroups: map[int]*ConnectionGroup{\n\t\t\t1: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t\t3: {\n\t\t\t\tlimit: 10,\n\t\t\t\tcounterMux: &sync.RWMutex{},\n\t\t\t\tgame: nil,\n\t\t\t\tbroadcast: nil,\n\t\t\t\tlogger: logger,\n\t\t\t\tchs: nil,\n\t\t\t\tchsMux: nil,\n\t\t\t\tstop: nil,\n\t\t\t\tstopper: nil,\n\t\t\t},\n\t\t},\n\t\tgroupsMutex: &sync.RWMutex{},\n\t\tgroupLimit: groupLimit,\n\t\tconnsLimit: connsLimit,\n\t\tconnsCount: 20,\n\t\tlogger: logger,\n\t}\n\n\tactualCount := m.GroupCount()\n\n\trequire.Equal(t, 2, actualCount)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/tsuru\/api\/app\"\n\t\"github.com\/globocom\/tsuru\/api\/auth\"\n\t\"github.com\/globocom\/tsuru\/api\/service\/consumption\"\n\t\"github.com\/globocom\/tsuru\/api\/service\/provision\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n)\n\nfunc fatal(err error) {\n\tfmt.Fprintln(os.Stderr, err)\n\tlog.Fatal(err)\n}\n\nfunc main() {\n\tlogger, err := syslog.NewLogger(syslog.LOG_INFO, stdlog.LstdFlags)\n\tif err != nil {\n\t\tstdlog.Fatal(err)\n\t}\n\tlog.SetLogger(logger)\n\tconfigFile := flag.String(\"config\", \"\/etc\/tsuru\/tsuru.conf\", \"tsuru config file\")\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tflag.Parse()\n\terr = config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tdb.Session, err = db.Open(connString, dbName)\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tdefer db.Session.Close()\n\n\tm := pat.New()\n\n\tm.Get(\"\/services\/instances\", AuthorizationRequiredHandler(consumption.ServicesInstancesHandler))\n\tm.Post(\"\/services\/instances\", AuthorizationRequiredHandler(consumption.CreateInstanceHandler))\n\tm.Put(\"\/services\/instances\/:instance\/:app\", AuthorizationRequiredHandler(app.BindHandler))\n\tm.Del(\"\/services\/instances\/:instance\/:app\", AuthorizationRequiredHandler(app.UnbindHandler))\n\tm.Del(\"\/services\/c\/instances\/:name\", AuthorizationRequiredHandler(consumption.RemoveServiceInstanceHandler))\n\tm.Get(\"\/services\/instances\/:instance\/status\", AuthorizationRequiredHandler(consumption.ServiceInstanceStatusHandler))\n\n\tm.Get(\"\/services\", AuthorizationRequiredHandler(provision.ServicesHandler))\n\tm.Post(\"\/services\", AuthorizationRequiredHandler(provision.CreateHandler))\n\tm.Put(\"\/services\", AuthorizationRequiredHandler(provision.UpdateHandler))\n\tm.Del(\"\/services\/:name\", AuthorizationRequiredHandler(provision.DeleteHandler))\n\tm.Get(\"\/services\/:name\", AuthorizationRequiredHandler(consumption.ServiceInfoHandler))\n\tm.Get(\"\/services\/c\/:name\/doc\", AuthorizationRequiredHandler(consumption.Doc))\n\tm.Get(\"\/services\/:name\/doc\", AuthorizationRequiredHandler(provision.GetDocHandler))\n\tm.Put(\"\/services\/:name\/doc\", AuthorizationRequiredHandler(provision.AddDocHandler))\n\tm.Put(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(provision.GrantAccessToTeamHandler))\n\tm.Del(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(provision.RevokeAccessFromTeamHandler))\n\n\tm.Del(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppDelete))\n\tm.Get(\"\/apps\/:name\/repository\/clone\", Handler(app.CloneRepositoryHandler))\n\tm.Get(\"\/apps\/:name\/avaliable\", Handler(app.AppIsAvaliableHandler))\n\tm.Get(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppInfo))\n\tm.Post(\"\/apps\/:name\/run\", AuthorizationRequiredHandler(app.RunCommand))\n\tm.Get(\"\/apps\/:name\/restart\", AuthorizationRequiredHandler(app.RestartHandler))\n\tm.Get(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.GetEnv))\n\tm.Post(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.SetEnv))\n\tm.Del(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.UnsetEnv))\n\tm.Get(\"\/apps\", AuthorizationRequiredHandler(app.AppList))\n\tm.Post(\"\/apps\", AuthorizationRequiredHandler(app.CreateAppHandler))\n\tm.Put(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.GrantAccessToTeamHandler))\n\tm.Del(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.RevokeAccessFromTeamHandler))\n\tm.Get(\"\/apps\/:name\/log\", AuthorizationRequiredHandler(app.AppLog))\n\tm.Post(\"\/apps\/:name\/log\", Handler(app.AddLogHandler))\n\n\tm.Post(\"\/users\", Handler(auth.CreateUser))\n\tm.Post(\"\/users\/:email\/tokens\", Handler(auth.Login))\n\tm.Del(\"\/users\", AuthorizationRequiredHandler(auth.RemoveUser))\n\tm.Post(\"\/users\/keys\", AuthorizationRequiredHandler(auth.AddKeyToUser))\n\tm.Del(\"\/users\/keys\", AuthorizationRequiredHandler(auth.RemoveKeyFromUser))\n\n\tm.Get(\"\/teams\", AuthorizationRequiredHandler(auth.ListTeams))\n\tm.Post(\"\/teams\", AuthorizationRequiredHandler(auth.CreateTeam))\n\tm.Del(\"\/teams\/:name\", AuthorizationRequiredHandler(auth.RemoveTeam))\n\tm.Put(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.AddUserToTeam))\n\tm.Del(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.RemoveUserFromTeam))\n\n\tif !*dry {\n\t\tlisten, err := config.GetString(\"listen\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfatal(http.ListenAndServe(listen, m))\n\t}\n}\n<commit_msg>api\/webserver: fix compilation<commit_after>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/tsuru\/api\/app\"\n\t\"github.com\/globocom\/tsuru\/api\/auth\"\n\t\"github.com\/globocom\/tsuru\/api\/service\/consumption\"\n\t\"github.com\/globocom\/tsuru\/api\/service\/provision\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nfunc fatal(err error) {\n\tfmt.Fprintln(os.Stderr, err)\n\tlog.Fatal(err)\n}\n\nfunc main() {\n\tlogger, err := syslog.NewLogger(syslog.LOG_INFO, stdlog.LstdFlags)\n\tif err != nil {\n\t\tstdlog.Fatal(err)\n\t}\n\tlog.SetLogger(logger)\n\tconfigFile := flag.String(\"config\", \"\/etc\/tsuru\/tsuru.conf\", \"tsuru config file\")\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tflag.Parse()\n\terr = config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tdb.Session, err = db.Open(connString, dbName)\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\tdefer db.Session.Close()\n\n\tm := pat.New()\n\n\tm.Get(\"\/services\/instances\", AuthorizationRequiredHandler(consumption.ServicesInstancesHandler))\n\tm.Post(\"\/services\/instances\", AuthorizationRequiredHandler(consumption.CreateInstanceHandler))\n\tm.Put(\"\/services\/instances\/:instance\/:app\", AuthorizationRequiredHandler(app.BindHandler))\n\tm.Del(\"\/services\/instances\/:instance\/:app\", AuthorizationRequiredHandler(app.UnbindHandler))\n\tm.Del(\"\/services\/c\/instances\/:name\", AuthorizationRequiredHandler(consumption.RemoveServiceInstanceHandler))\n\tm.Get(\"\/services\/instances\/:instance\/status\", AuthorizationRequiredHandler(consumption.ServiceInstanceStatusHandler))\n\n\tm.Get(\"\/services\", AuthorizationRequiredHandler(provision.ServicesHandler))\n\tm.Post(\"\/services\", AuthorizationRequiredHandler(provision.CreateHandler))\n\tm.Put(\"\/services\", AuthorizationRequiredHandler(provision.UpdateHandler))\n\tm.Del(\"\/services\/:name\", AuthorizationRequiredHandler(provision.DeleteHandler))\n\tm.Get(\"\/services\/:name\", AuthorizationRequiredHandler(consumption.ServiceInfoHandler))\n\tm.Get(\"\/services\/c\/:name\/doc\", AuthorizationRequiredHandler(consumption.Doc))\n\tm.Get(\"\/services\/:name\/doc\", AuthorizationRequiredHandler(provision.GetDocHandler))\n\tm.Put(\"\/services\/:name\/doc\", AuthorizationRequiredHandler(provision.AddDocHandler))\n\tm.Put(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(provision.GrantAccessToTeamHandler))\n\tm.Del(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(provision.RevokeAccessFromTeamHandler))\n\n\tm.Del(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppDelete))\n\tm.Get(\"\/apps\/:name\/repository\/clone\", Handler(app.CloneRepositoryHandler))\n\tm.Get(\"\/apps\/:name\/avaliable\", Handler(app.AppIsAvaliableHandler))\n\tm.Get(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppInfo))\n\tm.Post(\"\/apps\/:name\/run\", AuthorizationRequiredHandler(app.RunCommand))\n\tm.Get(\"\/apps\/:name\/restart\", AuthorizationRequiredHandler(app.RestartHandler))\n\tm.Get(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.GetEnv))\n\tm.Post(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.SetEnv))\n\tm.Del(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.UnsetEnv))\n\tm.Get(\"\/apps\", AuthorizationRequiredHandler(app.AppList))\n\tm.Post(\"\/apps\", AuthorizationRequiredHandler(app.CreateAppHandler))\n\tm.Put(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.GrantAccessToTeamHandler))\n\tm.Del(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.RevokeAccessFromTeamHandler))\n\tm.Get(\"\/apps\/:name\/log\", AuthorizationRequiredHandler(app.AppLog))\n\tm.Post(\"\/apps\/:name\/log\", Handler(app.AddLogHandler))\n\n\tm.Post(\"\/users\", Handler(auth.CreateUser))\n\tm.Post(\"\/users\/:email\/tokens\", Handler(auth.Login))\n\tm.Del(\"\/users\", AuthorizationRequiredHandler(auth.RemoveUser))\n\tm.Post(\"\/users\/keys\", AuthorizationRequiredHandler(auth.AddKeyToUser))\n\tm.Del(\"\/users\/keys\", AuthorizationRequiredHandler(auth.RemoveKeyFromUser))\n\n\tm.Get(\"\/teams\", AuthorizationRequiredHandler(auth.ListTeams))\n\tm.Post(\"\/teams\", AuthorizationRequiredHandler(auth.CreateTeam))\n\tm.Del(\"\/teams\/:name\", AuthorizationRequiredHandler(auth.RemoveTeam))\n\tm.Put(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.AddUserToTeam))\n\tm.Del(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.RemoveUserFromTeam))\n\n\tif !*dry {\n\t\tlisten, err := config.GetString(\"listen\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfatal(http.ListenAndServe(listen, m))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/timeredbull\/tsuru\/api\/app\"\n\t\"github.com\/timeredbull\/tsuru\/api\/auth\"\n\t\"github.com\/timeredbull\/tsuru\/api\/service\"\n\t\"github.com\/timeredbull\/tsuru\/config\"\n\t\"github.com\/timeredbull\/tsuru\/db\"\n\t\"github.com\/timeredbull\/tsuru\/log\"\n\t\"github.com\/timeredbull\/tsuru\/repository\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tvar err error\n\tlog.Target, err = syslog.NewLogger(syslog.LOG_INFO, stdlog.LstdFlags)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tconfigFile := flag.String(\"config\", \"\/etc\/tsuru\/tsuru.conf\", \"tsuru config file\")\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tflag.Parse()\n\terr = config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Session, err = db.Open(connString, dbName)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tdefer db.Session.Close()\n\n\trepository.RunAgent()\n\tm := pat.New()\n\n\tm.Post(\"\/services\", AuthorizationRequiredHandler(service.CreateHandler))\n\tm.Get(\"\/services\", AuthorizationRequiredHandler(service.ServicesHandler))\n\tm.Del(\"\/services\/:name\", AuthorizationRequiredHandler(service.DeleteHandler))\n\t\/\/ m.Post(\"\/services\/bind\", AuthorizationRequiredHandler(service.BindHandler))\n\t\/\/ m.Post(\"\/services\/unbind\", AuthorizationRequiredHandler(service.UnbindHandler))\n\tm.Put(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(service.GrantAccessToTeamHandler))\n\tm.Del(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(service.RevokeAccessFromTeamHandler))\n\n\tm.Del(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppDelete))\n\tm.Get(\"\/apps\/:name\/repository\/clone\", Handler(app.CloneRepositoryHandler))\n\tm.Get(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppInfo))\n\tm.Post(\"\/apps\/:name\/run\", AuthorizationRequiredHandler(app.RunCommand))\n\tm.Get(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.GetEnv))\n\tm.Post(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.SetEnv))\n\tm.Del(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.UnsetEnv))\n\tm.Get(\"\/apps\", AuthorizationRequiredHandler(app.AppList))\n\tm.Post(\"\/apps\", AuthorizationRequiredHandler(app.CreateAppHandler))\n\tm.Put(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.GrantAccessToTeamHandler))\n\tm.Del(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.RevokeAccessFromTeamHandler))\n\tm.Get(\"\/apps\/:name\/log\", AuthorizationRequiredHandler(app.AppLog))\n\n\tm.Post(\"\/users\", Handler(auth.CreateUser))\n\tm.Post(\"\/users\/:email\/tokens\", Handler(auth.Login))\n\tm.Post(\"\/users\/keys\", AuthorizationRequiredHandler(auth.AddKeyToUser))\n\tm.Del(\"\/users\/keys\", AuthorizationRequiredHandler(auth.RemoveKeyFromUser))\n\n\tm.Post(\"\/teams\", AuthorizationRequiredHandler(auth.CreateTeam))\n\tm.Put(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.AddUserToTeam))\n\tm.Del(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.RemoveUserFromTeam))\n\n\tlisten, err := config.GetString(\"listen\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !*dry {\n\t\tlog.Fatal(http.ListenAndServe(listen, m))\n\t}\n}\n<commit_msg>api\/webserver: register ListServiceInstances handler<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/timeredbull\/tsuru\/api\/app\"\n\t\"github.com\/timeredbull\/tsuru\/api\/auth\"\n\t\"github.com\/timeredbull\/tsuru\/api\/service\"\n\t\"github.com\/timeredbull\/tsuru\/config\"\n\t\"github.com\/timeredbull\/tsuru\/db\"\n\t\"github.com\/timeredbull\/tsuru\/log\"\n\t\"github.com\/timeredbull\/tsuru\/repository\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tvar err error\n\tlog.Target, err = syslog.NewLogger(syslog.LOG_INFO, stdlog.LstdFlags)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tconfigFile := flag.String(\"config\", \"\/etc\/tsuru\/tsuru.conf\", \"tsuru config file\")\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tflag.Parse()\n\terr = config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Session, err = db.Open(connString, dbName)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tdefer db.Session.Close()\n\n\trepository.RunAgent()\n\tm := pat.New()\n\n\tm.Get(\"\/services\/instances\", AuthorizationRequiredHandler(service.ListServiceInstances))\n\n\tm.Post(\"\/services\", AuthorizationRequiredHandler(service.CreateHandler))\n\tm.Get(\"\/services\", AuthorizationRequiredHandler(service.ServicesHandler))\n\tm.Del(\"\/services\/:name\", AuthorizationRequiredHandler(service.DeleteHandler))\n\t\/\/ m.Post(\"\/services\/bind\", AuthorizationRequiredHandler(service.BindHandler))\n\t\/\/ m.Post(\"\/services\/unbind\", AuthorizationRequiredHandler(service.UnbindHandler))\n\tm.Put(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(service.GrantAccessToTeamHandler))\n\tm.Del(\"\/services\/:service\/:team\", AuthorizationRequiredHandler(service.RevokeAccessFromTeamHandler))\n\n\tm.Del(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppDelete))\n\tm.Get(\"\/apps\/:name\/repository\/clone\", Handler(app.CloneRepositoryHandler))\n\tm.Get(\"\/apps\/:name\", AuthorizationRequiredHandler(app.AppInfo))\n\tm.Post(\"\/apps\/:name\/run\", AuthorizationRequiredHandler(app.RunCommand))\n\tm.Get(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.GetEnv))\n\tm.Post(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.SetEnv))\n\tm.Del(\"\/apps\/:name\/env\", AuthorizationRequiredHandler(app.UnsetEnv))\n\tm.Get(\"\/apps\", AuthorizationRequiredHandler(app.AppList))\n\tm.Post(\"\/apps\", AuthorizationRequiredHandler(app.CreateAppHandler))\n\tm.Put(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.GrantAccessToTeamHandler))\n\tm.Del(\"\/apps\/:app\/:team\", AuthorizationRequiredHandler(app.RevokeAccessFromTeamHandler))\n\tm.Get(\"\/apps\/:name\/log\", AuthorizationRequiredHandler(app.AppLog))\n\n\tm.Post(\"\/users\", Handler(auth.CreateUser))\n\tm.Post(\"\/users\/:email\/tokens\", Handler(auth.Login))\n\tm.Post(\"\/users\/keys\", AuthorizationRequiredHandler(auth.AddKeyToUser))\n\tm.Del(\"\/users\/keys\", AuthorizationRequiredHandler(auth.RemoveKeyFromUser))\n\n\tm.Post(\"\/teams\", AuthorizationRequiredHandler(auth.CreateTeam))\n\tm.Put(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.AddUserToTeam))\n\tm.Del(\"\/teams\/:team\/:user\", AuthorizationRequiredHandler(auth.RemoveUserFromTeam))\n\n\tlisten, err := config.GetString(\"listen\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !*dry {\n\t\tlog.Fatal(http.ListenAndServe(listen, m))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build ignore\n\npackage main\n\nimport (\n\t\".\"\n\t\"flag\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/timeredbull\/tsuru\/api\/app\"\n\t\"github.com\/timeredbull\/tsuru\/api\/auth\"\n\t\"github.com\/timeredbull\/tsuru\/api\/service\"\n\t\"github.com\/timeredbull\/tsuru\/config\"\n\t\"github.com\/timeredbull\/tsuru\/db\"\n\t\"github.com\/timeredbull\/tsuru\/log\"\n\t\"github.com\/timeredbull\/tsuru\/repository\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tvar err error\n\tlog.Target, err = syslog.NewLogger(syslog.LOG_INFO, stdlog.LstdFlags)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tconfigFile := flag.String(\"config\", \"\/etc\/tsuru\/tsuru.conf\", \"tsuru config file\")\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tflag.Parse()\n\terr = config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Session, err = db.Open(connString, dbName)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tdefer db.Session.Close()\n\n\trepository.RunAgent()\n\tm := pat.New()\n\n\tm.Post(\"\/services\", webserver.AuthorizationRequiredHandler(service.CreateHandler))\n\tm.Get(\"\/services\", webserver.AuthorizationRequiredHandler(service.ServicesHandler))\n\tm.Get(\"\/services\/types\", webserver.Handler(service.ServiceTypesHandler))\n\tm.Del(\"\/services\/:name\", webserver.AuthorizationRequiredHandler(service.DeleteHandler))\n\tm.Post(\"\/services\/bind\", webserver.AuthorizationRequiredHandler(service.BindHandler))\n\tm.Post(\"\/services\/unbind\", webserver.AuthorizationRequiredHandler(service.UnbindHandler))\n\tm.Put(\"\/services\/:service\/:team\", webserver.AuthorizationRequiredHandler(service.GrantAccessToTeamHandler))\n\tm.Del(\"\/services\/:service\/:team\", webserver.AuthorizationRequiredHandler(service.RevokeAccessFromTeamHandler))\n\n\tm.Del(\"\/apps\/:name\", webserver.AuthorizationRequiredHandler(app.AppDelete))\n\tm.Get(\"\/apps\/:name\/repository\/clone\", webserver.Handler(app.CloneRepositoryHandler))\n\tm.Get(\"\/apps\/:name\", webserver.AuthorizationRequiredHandler(app.AppInfo))\n\tm.Post(\"\/apps\/:name\/run\", webserver.AuthorizationRequiredHandler(app.RunCommand))\n\tm.Get(\"\/apps\/:name\/get-env\", webserver.AuthorizationRequiredHandler(app.GetEnv))\n\tm.Get(\"\/apps\", webserver.AuthorizationRequiredHandler(app.AppList))\n\tm.Post(\"\/apps\", webserver.AuthorizationRequiredHandler(app.CreateAppHandler))\n\tm.Put(\"\/apps\/:app\/:team\", webserver.AuthorizationRequiredHandler(app.GrantAccessToTeamHandler))\n\tm.Del(\"\/apps\/:app\/:team\", webserver.AuthorizationRequiredHandler(app.RevokeAccessFromTeamHandler))\n\n\tm.Post(\"\/users\", webserver.Handler(auth.CreateUser))\n\tm.Post(\"\/users\/:email\/tokens\", webserver.Handler(auth.Login))\n\tm.Post(\"\/users\/keys\", webserver.AuthorizationRequiredHandler(auth.AddKeyToUser))\n\tm.Del(\"\/users\/keys\", webserver.AuthorizationRequiredHandler(auth.RemoveKeyFromUser))\n\n\tm.Post(\"\/teams\", webserver.AuthorizationRequiredHandler(auth.CreateTeam))\n\tm.Put(\"\/teams\/:team\/:user\", webserver.AuthorizationRequiredHandler(auth.AddUserToTeam))\n\tm.Del(\"\/teams\/:team\/:user\", webserver.AuthorizationRequiredHandler(auth.RemoveUserFromTeam))\n\n\tlisten, err := config.GetString(\"listen\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !*dry {\n\t\tlog.Fatal(http.ListenAndServe(listen, m))\n\t}\n}\n<commit_msg>api\/webserver: changed GetEnv url to GET \/app\/:name\/env<commit_after>\/\/ +build ignore\n\npackage main\n\nimport (\n\t\".\"\n\t\"flag\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/timeredbull\/tsuru\/api\/app\"\n\t\"github.com\/timeredbull\/tsuru\/api\/auth\"\n\t\"github.com\/timeredbull\/tsuru\/api\/service\"\n\t\"github.com\/timeredbull\/tsuru\/config\"\n\t\"github.com\/timeredbull\/tsuru\/db\"\n\t\"github.com\/timeredbull\/tsuru\/log\"\n\t\"github.com\/timeredbull\/tsuru\/repository\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tvar err error\n\tlog.Target, err = syslog.NewLogger(syslog.LOG_INFO, stdlog.LstdFlags)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tconfigFile := flag.String(\"config\", \"\/etc\/tsuru\/tsuru.conf\", \"tsuru config file\")\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tflag.Parse()\n\terr = config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Session, err = db.Open(connString, dbName)\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\tdefer db.Session.Close()\n\n\trepository.RunAgent()\n\tm := pat.New()\n\n\tm.Post(\"\/services\", webserver.AuthorizationRequiredHandler(service.CreateHandler))\n\tm.Get(\"\/services\", webserver.AuthorizationRequiredHandler(service.ServicesHandler))\n\tm.Get(\"\/services\/types\", webserver.Handler(service.ServiceTypesHandler))\n\tm.Del(\"\/services\/:name\", webserver.AuthorizationRequiredHandler(service.DeleteHandler))\n\tm.Post(\"\/services\/bind\", webserver.AuthorizationRequiredHandler(service.BindHandler))\n\tm.Post(\"\/services\/unbind\", webserver.AuthorizationRequiredHandler(service.UnbindHandler))\n\tm.Put(\"\/services\/:service\/:team\", webserver.AuthorizationRequiredHandler(service.GrantAccessToTeamHandler))\n\tm.Del(\"\/services\/:service\/:team\", webserver.AuthorizationRequiredHandler(service.RevokeAccessFromTeamHandler))\n\n\tm.Del(\"\/apps\/:name\", webserver.AuthorizationRequiredHandler(app.AppDelete))\n\tm.Get(\"\/apps\/:name\/repository\/clone\", webserver.Handler(app.CloneRepositoryHandler))\n\tm.Get(\"\/apps\/:name\", webserver.AuthorizationRequiredHandler(app.AppInfo))\n\tm.Post(\"\/apps\/:name\/run\", webserver.AuthorizationRequiredHandler(app.RunCommand))\n\tm.Get(\"\/apps\/:name\/env\", webserver.AuthorizationRequiredHandler(app.GetEnv))\n\tm.Get(\"\/apps\", webserver.AuthorizationRequiredHandler(app.AppList))\n\tm.Post(\"\/apps\", webserver.AuthorizationRequiredHandler(app.CreateAppHandler))\n\tm.Put(\"\/apps\/:app\/:team\", webserver.AuthorizationRequiredHandler(app.GrantAccessToTeamHandler))\n\tm.Del(\"\/apps\/:app\/:team\", webserver.AuthorizationRequiredHandler(app.RevokeAccessFromTeamHandler))\n\n\tm.Post(\"\/users\", webserver.Handler(auth.CreateUser))\n\tm.Post(\"\/users\/:email\/tokens\", webserver.Handler(auth.Login))\n\tm.Post(\"\/users\/keys\", webserver.AuthorizationRequiredHandler(auth.AddKeyToUser))\n\tm.Del(\"\/users\/keys\", webserver.AuthorizationRequiredHandler(auth.RemoveKeyFromUser))\n\n\tm.Post(\"\/teams\", webserver.AuthorizationRequiredHandler(auth.CreateTeam))\n\tm.Put(\"\/teams\/:team\/:user\", webserver.AuthorizationRequiredHandler(auth.AddUserToTeam))\n\tm.Del(\"\/teams\/:team\/:user\", webserver.AuthorizationRequiredHandler(auth.RemoveUserFromTeam))\n\n\tlisten, err := config.GetString(\"listen\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !*dry {\n\t\tlog.Fatal(http.ListenAndServe(listen, m))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 SteelSeries ApS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package implements a basic LISP interpretor for embedding in a go program for scripting.\n\/\/ This file contains the built-in primitive functions.\n\npackage golisp\n\nimport (\n \"errors\"\n \"fmt\"\n)\n\nfunc RegisterSpecialFormPrimitives() {\n MakePrimitiveFunction(\"cond\", -1, CondImpl)\n MakePrimitiveFunction(\"case\", -1, CaseImpl)\n MakePrimitiveFunction(\"if\", -1, IfImpl)\n MakePrimitiveFunction(\"lambda\", -1, LambdaImpl)\n MakePrimitiveFunction(\"define\", -1, DefineImpl)\n MakePrimitiveFunction(\"defun\", -1, DefunImpl)\n MakePrimitiveFunction(\"defmacro\", -1, DefmacroImpl)\n MakePrimitiveFunction(\"let\", -1, LetImpl)\n MakePrimitiveFunction(\"begin\", -1, BeginImpl)\n MakePrimitiveFunction(\"do\", -1, DoImpl)\n MakePrimitiveFunction(\"apply\", 2, ApplyImpl)\n MakePrimitiveFunction(\"eval\", 1, EvalImpl)\n MakePrimitiveFunction(\"->\", -1, ChainImpl)\n MakePrimitiveFunction(\"=>\", -1, TapImpl)\n}\n\nfunc CondImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var condition *Data\n for c := args; NotNilP(c); c = Cdr(c) {\n clause := Car(c)\n if !PairP(clause) {\n err = errors.New(\"Cond expect a sequence of clauses that are lists\")\n return\n }\n condition, err = Eval(Car(clause), env)\n if err != nil {\n return\n }\n if BooleanValue(condition) || StringValue(Car(clause)) == \"else\" {\n for e := Cdr(clause); NotNilP(e); e = Cdr(e) {\n result, err = Eval(Car(e), env)\n if err != nil {\n return\n }\n }\n return\n }\n }\n return\n}\n\nfunc evalList(l *Data, env *SymbolTableFrame) (result *Data, err error) {\n for sexpr := l; NotNilP(sexpr); sexpr = Cdr(sexpr) {\n result, err = Eval(Car(sexpr), env)\n if err != nil {\n return\n }\n }\n return\n}\n\nfunc CaseImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var keyValue *Data\n var targetValue *Data\n\n keyValue, err = Eval(Car(args), env)\n if err != nil {\n return\n }\n\n for clauseCell := Cdr(args); NotNilP(clauseCell); clauseCell = Cdr(clauseCell) {\n clause := Car(clauseCell)\n if PairP(clause) {\n if IsEqual(Car(clause), SymbolWithName(\"else\")) {\n return evalList(Cdr(clause), env)\n } else {\n targetValue, err = Eval(Car(clause), env)\n if IsEqual(targetValue, keyValue) {\n return evalList(Cdr(clause), env)\n }\n }\n } else {\n err = errors.New(\"Case requires non-atomic clauses\")\n return\n }\n }\n\n return\n}\n\nfunc IfImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) < 2 || Length(args) > 3 {\n err = errors.New(fmt.Sprintf(\"IF requires 2 or 3 arguments. Received %d.\", Length(args)))\n return\n }\n\n c, err := Eval(Car(args), env)\n if err != nil {\n return\n }\n condition := BooleanValue(c)\n thenClause := Second(args)\n elseClause := Third(args)\n\n if condition {\n return Eval(thenClause, env)\n } else {\n return Eval(elseClause, env)\n }\n}\n\nfunc LambdaImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n params := Car(args)\n body := Cdr(args)\n return FunctionWithNameParamsBodyAndParent(\"anonymous\", params, body, env), nil\n}\n\nfunc DefineImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var value *Data\n thing := Car(args)\n if SymbolP(thing) {\n value, err = Eval(Cadr(args), env)\n if err != nil {\n return\n }\n } else if PairP(thing) {\n name := Car(thing)\n params := Cdr(thing)\n thing = name\n if !SymbolP(name) {\n err = errors.New(\"Function name has to be a symbol\")\n return\n }\n body := Cdr(args)\n value = FunctionWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n } else {\n err = errors.New(\"Invalid definition\")\n return\n }\n env.BindLocallyTo(thing, value)\n return value, nil\n}\n\nfunc DefunImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var value *Data\n thing := Car(args)\n if PairP(thing) {\n name := Car(thing)\n params := Cdr(thing)\n thing = name\n if !SymbolP(name) {\n err = errors.New(\"Function name has to be a symbol\")\n return\n }\n body := Cdr(args)\n value = FunctionWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n } else {\n err = errors.New(\"Invalid function definition\")\n return\n }\n env.BindLocallyTo(thing, value)\n return value, nil\n}\n\nfunc DefmacroImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var value *Data\n thing := Car(args)\n if PairP(thing) {\n name := Car(thing)\n params := Cdr(thing)\n thing = name\n if !SymbolP(name) {\n err = errors.New(\"Macro name has to be a symbol\")\n return\n }\n body := Cadr(args)\n value = MacroWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n } else {\n err = errors.New(\"Invalid macro definition\")\n return\n }\n env.BindLocallyTo(thing, value)\n return value, nil\n}\n\nfunc bindLetLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n var name *Data\n var value *Data\n\n for cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n bindingPair := Car(cell)\n if !PairP(bindingPair) {\n err = errors.New(\"Let requires a list of bindings (with are pairs) as it's first argument\")\n return\n }\n name = Car(bindingPair)\n if !SymbolP(name) {\n err = errors.New(\"First part of a let binding pair must be a symbol\")\n }\n value, err = Eval(Cadr(bindingPair), env)\n if err != nil {\n return\n }\n env.BindLocallyTo(name, value)\n }\n return\n}\n\nfunc LetImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) < 1 {\n err = errors.New(\"Let requires at least a list of bindings\")\n return\n }\n\n if !PairP(Car(args)) {\n err = errors.New(\"Let requires a list of bindings as it's first argument\")\n return\n }\n\n localFrame := NewSymbolTableFrameBelow(env)\n bindLetLocals(Car(args), localFrame)\n\n for cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, localFrame)\n if err != nil {\n return\n }\n }\n\n return\n}\n\nfunc BeginImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n for cell := args; NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, env)\n if err != nil {\n return\n }\n }\n return\n}\n\nfunc rebindDoLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n var name *Data\n var value *Data\n\n for cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n bindingTuple := Car(cell)\n name = First(bindingTuple)\n if NotNilP(Third(bindingTuple)) {\n value, err = Eval(Third(bindingTuple), env)\n if err != nil {\n return\n }\n env.BindLocallyTo(name, value)\n }\n }\n return\n}\n\nfunc DoImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) < 2 {\n err = errors.New(\"Do requires at least a list of bindings and a test clause\")\n return\n }\n\n bindings := Car(args)\n if !PairP(bindings) {\n err = errors.New(\"Do requires a list of bindings as it's first argument\")\n return\n }\n\n testClause := Cadr(args)\n if !PairP(testClause) {\n err = errors.New(\"Do requires a list as it's second argument\")\n return\n }\n\n localFrame := NewSymbolTableFrameBelow(env)\n bindLetLocals(bindings, localFrame)\n\n body := Cddr(args)\n\n var shouldExit *Data\n\n for true {\n shouldExit, err = Eval(Car(testClause), localFrame)\n if err != nil {\n return\n }\n\n if BooleanValue(shouldExit) {\n for cell := Cdr(testClause); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, localFrame)\n if err != nil {\n return\n }\n }\n return\n }\n\n for cell := body; NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, localFrame)\n if err != nil {\n return\n }\n }\n\n rebindDoLocals(bindings, localFrame)\n }\n return\n}\n\nfunc ApplyImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n f, err := Eval(Car(args), env)\n if err != nil {\n return\n }\n\n vals, err := Eval(Cadr(args), env)\n if err != nil {\n return\n }\n\n return Apply(f, vals, env)\n}\n\nfunc EvalImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n val, err := Eval(Car(args), env)\n if err != nil {\n return\n }\n\n return Eval(val, env)\n}\n\nfunc ChainImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) == 0 {\n err = errors.New(\"-> requires at least an initial value.\")\n return\n }\n\n var value *Data\n\n value, err = Eval(Car(args), env)\n if err != nil {\n return\n }\n\n for cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n var newExpr *Data\n if ListP(sexpr) {\n newExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n } else {\n newExpr = Cons(sexpr, Cons(value, nil))\n }\n value, err = Eval(newExpr, env)\n if err != nil {\n return\n }\n }\n result = value\n return\n}\n\nfunc TapImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) == 0 {\n err = errors.New(\"tap requires at least an initial value.\")\n return\n }\n\n var value *Data\n\n value, err = Eval(Car(args), env)\n if err != nil {\n return\n }\n result = value\n\n for cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n var newExpr *Data\n if ListP(sexpr) {\n newExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n } else {\n newExpr = Cons(sexpr, Cons(value, nil))\n }\n _, err = Eval(newExpr, env)\n if err != nil {\n return\n }\n }\n return\n}\n<commit_msg>Removed defun<commit_after>\/\/ Copyright 2014 SteelSeries ApS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package implements a basic LISP interpretor for embedding in a go program for scripting.\n\/\/ This file contains the built-in primitive functions.\n\npackage golisp\n\nimport (\n \"errors\"\n \"fmt\"\n)\n\nfunc RegisterSpecialFormPrimitives() {\n MakePrimitiveFunction(\"cond\", -1, CondImpl)\n MakePrimitiveFunction(\"case\", -1, CaseImpl)\n MakePrimitiveFunction(\"if\", -1, IfImpl)\n MakePrimitiveFunction(\"lambda\", -1, LambdaImpl)\n MakePrimitiveFunction(\"define\", -1, DefineImpl)\n MakePrimitiveFunction(\"defmacro\", -1, DefmacroImpl)\n MakePrimitiveFunction(\"let\", -1, LetImpl)\n MakePrimitiveFunction(\"begin\", -1, BeginImpl)\n MakePrimitiveFunction(\"do\", -1, DoImpl)\n MakePrimitiveFunction(\"apply\", 2, ApplyImpl)\n MakePrimitiveFunction(\"eval\", 1, EvalImpl)\n MakePrimitiveFunction(\"->\", -1, ChainImpl)\n MakePrimitiveFunction(\"=>\", -1, TapImpl)\n}\n\nfunc CondImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var condition *Data\n for c := args; NotNilP(c); c = Cdr(c) {\n clause := Car(c)\n if !PairP(clause) {\n err = errors.New(\"Cond expect a sequence of clauses that are lists\")\n return\n }\n condition, err = Eval(Car(clause), env)\n if err != nil {\n return\n }\n if BooleanValue(condition) || StringValue(Car(clause)) == \"else\" {\n for e := Cdr(clause); NotNilP(e); e = Cdr(e) {\n result, err = Eval(Car(e), env)\n if err != nil {\n return\n }\n }\n return\n }\n }\n return\n}\n\nfunc evalList(l *Data, env *SymbolTableFrame) (result *Data, err error) {\n for sexpr := l; NotNilP(sexpr); sexpr = Cdr(sexpr) {\n result, err = Eval(Car(sexpr), env)\n if err != nil {\n return\n }\n }\n return\n}\n\nfunc CaseImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var keyValue *Data\n var targetValue *Data\n\n keyValue, err = Eval(Car(args), env)\n if err != nil {\n return\n }\n\n for clauseCell := Cdr(args); NotNilP(clauseCell); clauseCell = Cdr(clauseCell) {\n clause := Car(clauseCell)\n if PairP(clause) {\n if IsEqual(Car(clause), SymbolWithName(\"else\")) {\n return evalList(Cdr(clause), env)\n } else {\n targetValue, err = Eval(Car(clause), env)\n if IsEqual(targetValue, keyValue) {\n return evalList(Cdr(clause), env)\n }\n }\n } else {\n err = errors.New(\"Case requires non-atomic clauses\")\n return\n }\n }\n\n return\n}\n\nfunc IfImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) < 2 || Length(args) > 3 {\n err = errors.New(fmt.Sprintf(\"IF requires 2 or 3 arguments. Received %d.\", Length(args)))\n return\n }\n\n c, err := Eval(Car(args), env)\n if err != nil {\n return\n }\n condition := BooleanValue(c)\n thenClause := Second(args)\n elseClause := Third(args)\n\n if condition {\n return Eval(thenClause, env)\n } else {\n return Eval(elseClause, env)\n }\n}\n\nfunc LambdaImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n params := Car(args)\n body := Cdr(args)\n return FunctionWithNameParamsBodyAndParent(\"anonymous\", params, body, env), nil\n}\n\nfunc DefineImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var value *Data\n thing := Car(args)\n if SymbolP(thing) {\n value, err = Eval(Cadr(args), env)\n if err != nil {\n return\n }\n } else if PairP(thing) {\n name := Car(thing)\n params := Cdr(thing)\n thing = name\n if !SymbolP(name) {\n err = errors.New(\"Function name has to be a symbol\")\n return\n }\n body := Cdr(args)\n value = FunctionWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n } else {\n err = errors.New(\"Invalid definition\")\n return\n }\n env.BindLocallyTo(thing, value)\n return value, nil\n}\n\nfunc DefmacroImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n var value *Data\n thing := Car(args)\n if PairP(thing) {\n name := Car(thing)\n params := Cdr(thing)\n thing = name\n if !SymbolP(name) {\n err = errors.New(\"Macro name has to be a symbol\")\n return\n }\n body := Cadr(args)\n value = MacroWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n } else {\n err = errors.New(\"Invalid macro definition\")\n return\n }\n env.BindLocallyTo(thing, value)\n return value, nil\n}\n\nfunc bindLetLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n var name *Data\n var value *Data\n\n for cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n bindingPair := Car(cell)\n if !PairP(bindingPair) {\n err = errors.New(\"Let requires a list of bindings (with are pairs) as it's first argument\")\n return\n }\n name = Car(bindingPair)\n if !SymbolP(name) {\n err = errors.New(\"First part of a let binding pair must be a symbol\")\n }\n value, err = Eval(Cadr(bindingPair), env)\n if err != nil {\n return\n }\n env.BindLocallyTo(name, value)\n }\n return\n}\n\nfunc LetImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) < 1 {\n err = errors.New(\"Let requires at least a list of bindings\")\n return\n }\n\n if !PairP(Car(args)) {\n err = errors.New(\"Let requires a list of bindings as it's first argument\")\n return\n }\n\n localFrame := NewSymbolTableFrameBelow(env)\n bindLetLocals(Car(args), localFrame)\n\n for cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, localFrame)\n if err != nil {\n return\n }\n }\n\n return\n}\n\nfunc BeginImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n for cell := args; NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, env)\n if err != nil {\n return\n }\n }\n return\n}\n\nfunc rebindDoLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n var name *Data\n var value *Data\n\n for cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n bindingTuple := Car(cell)\n name = First(bindingTuple)\n if NotNilP(Third(bindingTuple)) {\n value, err = Eval(Third(bindingTuple), env)\n if err != nil {\n return\n }\n env.BindLocallyTo(name, value)\n }\n }\n return\n}\n\nfunc DoImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) < 2 {\n err = errors.New(\"Do requires at least a list of bindings and a test clause\")\n return\n }\n\n bindings := Car(args)\n if !PairP(bindings) {\n err = errors.New(\"Do requires a list of bindings as it's first argument\")\n return\n }\n\n testClause := Cadr(args)\n if !PairP(testClause) {\n err = errors.New(\"Do requires a list as it's second argument\")\n return\n }\n\n localFrame := NewSymbolTableFrameBelow(env)\n bindLetLocals(bindings, localFrame)\n\n body := Cddr(args)\n\n var shouldExit *Data\n\n for true {\n shouldExit, err = Eval(Car(testClause), localFrame)\n if err != nil {\n return\n }\n\n if BooleanValue(shouldExit) {\n for cell := Cdr(testClause); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, localFrame)\n if err != nil {\n return\n }\n }\n return\n }\n\n for cell := body; NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n result, err = Eval(sexpr, localFrame)\n if err != nil {\n return\n }\n }\n\n rebindDoLocals(bindings, localFrame)\n }\n return\n}\n\nfunc ApplyImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n f, err := Eval(Car(args), env)\n if err != nil {\n return\n }\n\n vals, err := Eval(Cadr(args), env)\n if err != nil {\n return\n }\n\n return Apply(f, vals, env)\n}\n\nfunc EvalImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n val, err := Eval(Car(args), env)\n if err != nil {\n return\n }\n\n return Eval(val, env)\n}\n\nfunc ChainImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) == 0 {\n err = errors.New(\"-> requires at least an initial value.\")\n return\n }\n\n var value *Data\n\n value, err = Eval(Car(args), env)\n if err != nil {\n return\n }\n\n for cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n var newExpr *Data\n if ListP(sexpr) {\n newExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n } else {\n newExpr = Cons(sexpr, Cons(value, nil))\n }\n value, err = Eval(newExpr, env)\n if err != nil {\n return\n }\n }\n result = value\n return\n}\n\nfunc TapImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n if Length(args) == 0 {\n err = errors.New(\"tap requires at least an initial value.\")\n return\n }\n\n var value *Data\n\n value, err = Eval(Car(args), env)\n if err != nil {\n return\n }\n result = value\n\n for cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n sexpr := Car(cell)\n var newExpr *Data\n if ListP(sexpr) {\n newExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n } else {\n newExpr = Cons(sexpr, Cons(value, nil))\n }\n _, err = Eval(newExpr, env)\n if err != nil {\n return\n }\n }\n return\n}\n<|endoftext|>"} {"text":"<commit_before>package nsmvpp\n\nimport (\n\tgovppapi \"git.fd.io\/govpp.git\/api\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype operation interface {\n\tapply(apiCh govppapi.Channel) error\n\trollback() operation\n}\n\nfunc rollback(tx []operation, pos int, apiCh govppapi.Channel) error {\n\tlogrus.Infof(\"Rolling back operations...\")\n\tvar err error\n\tfor i := pos - 1; pos >= 0; pos-- {\n\t\terr = tx[i].rollback().apply(apiCh)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error while rolling back, (I will continue rollback operations): %v\", err)\n\t\t}\n\t}\n\tlogrus.Info(\"Done. I did my best to roll things back\")\n\treturn err\n}\n\nfunc perform(tx []operation, apiCh govppapi.Channel) (int, error) {\n\tlogrus.Infof(\"Programming dataplane...\")\n\tfor i := range tx {\n\t\terr := tx[i].apply(apiCh)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error performing operation %v\", err)\n\t\t\treturn i, err\n\t\t}\n\t}\n\tlogrus.Infof(\"Transaction completed!\")\n\treturn len(tx), nil\n}\n<commit_msg>Fix for-loop index (#407)<commit_after>package nsmvpp\n\nimport (\n\tgovppapi \"git.fd.io\/govpp.git\/api\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype operation interface {\n\tapply(apiCh govppapi.Channel) error\n\trollback() operation\n}\n\nfunc rollback(tx []operation, pos int, apiCh govppapi.Channel) error {\n\tlogrus.Infof(\"Rolling back operations...\")\n\tvar err error\n\tfor i := pos - 1; i >= 0; i-- {\n\t\terr = tx[i].rollback().apply(apiCh)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error while rolling back, (I will continue rollback operations): %v\", err)\n\t\t}\n\t}\n\tlogrus.Info(\"Done. I did my best to roll things back\")\n\treturn err\n}\n\nfunc perform(tx []operation, apiCh govppapi.Channel) (int, error) {\n\tlogrus.Infof(\"Programming dataplane...\")\n\tfor i := range tx {\n\t\terr := tx[i].apply(apiCh)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error performing operation %v\", err)\n\t\t\treturn i, err\n\t\t}\n\t}\n\tlogrus.Infof(\"Transaction completed!\")\n\treturn len(tx), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package benchlist\n\nimport (\n\t\"container\/list\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/validators\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/constants\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/timer\"\n\n\tsafemath \"github.com\/ava-labs\/avalanchego\/utils\/math\"\n)\n\n\/\/ QueryBenchlist ...\ntype QueryBenchlist interface {\n\t\/\/ RegisterQuery registers a sent query and returns whether the query is subject to benchlist\n\tRegisterQuery(validatorID ids.ShortID, requestID uint32, msgType constants.MsgType) bool\n\t\/\/ RegisterResponse registers the response to a query message\n\tRegisterResponse(validatorID ids.ShortID, requstID uint32)\n\t\/\/ QueryFailed registers that a query did not receive a response within our synchrony bound\n\tQueryFailed(validatorID ids.ShortID, requestID uint32)\n}\n\n\/\/ If a peer consistently does not respond to queries, it will\n\/\/ increase latencies on the network whenever that peer is polled.\n\/\/ If we cannot terminate the poll early, then the poll will wait\n\/\/ the full timeout before finalizing the poll and making progress.\n\/\/ This can increase network latencies to an undesirable level.\n\n\/\/ Therefore, a benchlist is used as a heurstic to immediately fail\n\/\/ queries to nodes that are consistently not responding.\n\ntype queryBenchlist struct {\n\tvdrs validators.Set\n\t\/\/ Validator ID --> Request ID --> non-empty iff\n\t\/\/ there is an outstanding request to this validator\n\t\/\/ with the corresponding requestID\n\tpendingQueries map[[20]byte]map[uint32]pendingQuery\n\t\/\/ Map of consecutive query failures\n\tconsecutiveFailures map[[20]byte]int\n\n\t\/\/ Maintain benchlist\n\tbenchlistTimes map[[20]byte]time.Time\n\tbenchlistOrder *list.List\n\tbenchlistSet ids.ShortSet\n\n\tthreshold int\n\tduration time.Duration\n\tmaxPortion float64\n\n\tclock timer.Clock\n\n\tmetrics *metrics\n\tctx *snow.Context\n\n\tlock sync.Mutex\n}\n\ntype pendingQuery struct {\n\tregistered time.Time\n\tmsgType constants.MsgType\n}\n\n\/\/ NewQueryBenchlist ...\nfunc NewQueryBenchlist(validators validators.Set, ctx *snow.Context, threshold int, duration time.Duration, maxPortion float64, summaryEnabled bool, namespace string) QueryBenchlist {\n\tmetrics := &metrics{}\n\tmetrics.Initialize(ctx, namespace, summaryEnabled)\n\n\treturn &queryBenchlist{\n\t\tpendingQueries: make(map[[20]byte]map[uint32]pendingQuery),\n\t\tconsecutiveFailures: make(map[[20]byte]int),\n\t\tbenchlistTimes: make(map[[20]byte]time.Time),\n\t\tbenchlistOrder: list.New(),\n\t\tbenchlistSet: ids.ShortSet{},\n\t\tvdrs: validators,\n\t\tthreshold: threshold,\n\t\tduration: duration,\n\t\tmaxPortion: maxPortion,\n\t\tctx: ctx,\n\t\tmetrics: metrics,\n\t}\n}\n\n\/\/ RegisterQuery attempts to register a query from [validatorID] and returns true\n\/\/ if that request should be made (not subject to benchlisting)\nfunc (b *queryBenchlist) RegisterQuery(validatorID ids.ShortID, requestID uint32, msgType constants.MsgType) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tkey := validatorID.Key()\n\tif benched := b.benched(validatorID); benched {\n\t\treturn false\n\t}\n\n\tvalidatorRequests, ok := b.pendingQueries[key]\n\tif !ok {\n\t\tvalidatorRequests = make(map[uint32]pendingQuery)\n\t\tb.pendingQueries[key] = validatorRequests\n\t}\n\n\tvalidatorRequests[requestID] = pendingQuery{\n\t\tregistered: b.clock.Time(),\n\t\tmsgType: msgType,\n\t}\n\treturn true\n}\n\n\/\/ RegisterResponse removes the query from pending\nfunc (b *queryBenchlist) RegisterResponse(validatorID ids.ShortID, requestID uint32) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tif ok := b.removeQuery(validatorID, requestID); !ok {\n\t\treturn\n\t}\n\n\t\/\/ Reset consecutive failures on success\n\tdelete(b.consecutiveFailures, validatorID.Key())\n}\n\n\/\/ QueryFailed notes a failure and benchlists [validatorID] if necessary\nfunc (b *queryBenchlist) QueryFailed(validatorID ids.ShortID, requestID uint32) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tif ok := b.removeQuery(validatorID, requestID); !ok {\n\t\treturn\n\t}\n\n\tkey := validatorID.Key()\n\t\/\/ Add a failure and benches [validatorID] if it has\n\t\/\/ passed the threshold\n\tb.consecutiveFailures[key]++\n\tif b.consecutiveFailures[key] >= b.threshold {\n\t\tb.bench(validatorID)\n\t}\n}\n\nfunc (b *queryBenchlist) bench(validatorID ids.ShortID) {\n\tif b.benchlistSet.Contains(validatorID) {\n\t\treturn\n\t}\n\n\tkey := validatorID.Key()\n\n\t\/\/ Goal:\n\t\/\/ Random end time in the range:\n\t\/\/ [max(lastEndTime,(currentTime + (duration\/2)): currentTime + duration]\n\t\/\/ This maintains the invariant that validators in benchlistOrder are\n\t\/\/ ordered by the time that they should be unbenched\n\tcurrTime := b.clock.Time()\n\tminEndTime := currTime.Add(b.duration \/ 2)\n\tif elem := b.benchlistOrder.Back(); elem != nil {\n\t\tlastValidator := elem.Value.(ids.ShortID)\n\t\tlastEndTime := b.benchlistTimes[lastValidator.Key()]\n\t\tif lastEndTime.After(minEndTime) {\n\t\t\tminEndTime = lastEndTime\n\t\t}\n\t}\n\tmaxEndTime := currTime.Add(b.duration)\n\t\/\/ Since maxEndTime is at least [duration] in the future and every element\n\t\/\/ added to benchlist was added in the past with an end time at most [duration]\n\t\/\/ in the future, this should never produce a negative duration.\n\tdiff := maxEndTime.Sub(minEndTime)\n\trandomizedEndTime := minEndTime.Add(time.Duration(rand.Float64() * float64(diff))) \/\/ #nosec G404\n\n\t\/\/ Add to benchlist times with randomized delay\n\tb.benchlistTimes[key] = randomizedEndTime\n\tb.benchlistOrder.PushBack(validatorID)\n\tb.benchlistSet.Add(validatorID)\n\tdelete(b.consecutiveFailures, key)\n\tb.ctx.Log.Debug(\n\t\t\"benching validator %s after %d consecutive failed queries for %s\",\n\t\tvalidatorID,\n\t\tb.threshold,\n\t\trandomizedEndTime.Sub(currTime),\n\t)\n\n\t\/\/ Note: there could be a memory leak if a large number of\n\t\/\/ validators were added, sampled, benched, and never sampled\n\t\/\/ again. Due to the minimum staking amount and durations this\n\t\/\/ is not a realistic concern.\n\tb.cleanup()\n}\n\n\/\/ benched checks if [validatorID] is currently benched\n\/\/ and calls cleanup if its benching period has elapsed\nfunc (b *queryBenchlist) benched(validatorID ids.ShortID) bool {\n\tkey := validatorID.Key()\n\n\tend, ok := b.benchlistTimes[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif b.clock.Time().Before(end) {\n\t\treturn true\n\t}\n\n\t\/\/ If a benched item has expired, cleanup the benchlist\n\tb.cleanup()\n\treturn false\n}\n\n\/\/ cleanup ensures that we have not benched too much stake\n\/\/ and removes anything from the benchlist whose time has expired\nfunc (b *queryBenchlist) cleanup() {\n\tcurrentWeight, err := b.vdrs.SubsetWeight(b.benchlistSet)\n\tif err != nil {\n\t\t\/\/ Add log for this, should never happen\n\t\tb.ctx.Log.Error(\"failed to calculate subset weight due to: %w... Resetting benchlist\", err)\n\t\tb.reset()\n\t\treturn\n\t}\n\n\tbenchLen := b.benchlistSet.Len()\n\tupdatedWeight := currentWeight\n\ttotalWeight := b.vdrs.Weight()\n\tmaxBenchlistWeight := uint64(float64(totalWeight) * b.maxPortion)\n\n\t\/\/ Iterate over elements of the benchlist in order of expiration\n\tfor e := b.benchlistOrder.Front(); e != nil; e = e.Next() {\n\t\tvalidatorID := e.Value.(ids.ShortID)\n\t\tkey := validatorID.Key()\n\t\tend := b.benchlistTimes[key]\n\t\t\/\/ Remove elements with the next expiration until the next item has not\n\t\t\/\/ expired and the bench has less than the maximum weight\n\t\t\/\/ Note: this creates an edge case where benchlisting a validator\n\t\t\/\/ with a sufficient stake may clear the benchlist\n\t\tif b.clock.Time().Before(end) && currentWeight < maxBenchlistWeight {\n\t\t\tbreak\n\t\t}\n\n\t\tremoveWeight, ok := b.vdrs.GetWeight(validatorID)\n\t\tif ok {\n\t\t\tnewWeight, err := safemath.Sub64(currentWeight, removeWeight)\n\t\t\tif err != nil {\n\t\t\t\tb.ctx.Log.Error(\"failed to calculate new subset weight due to: %w... Resetting benchlist\", err)\n\t\t\t\tb.reset()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tupdatedWeight = newWeight\n\t\t}\n\n\t\tb.benchlistOrder.Remove(e)\n\t\tdelete(b.benchlistTimes, key)\n\t\tb.benchlistSet.Remove(validatorID)\n\t}\n\n\tupdatedBenchLen := b.benchlistSet.Len()\n\tb.ctx.Log.Debug(\"benchlist weight: (%d\/%d) -> (%d\/%d). Benched Validators: %d -> %d\",\n\t\tcurrentWeight,\n\t\ttotalWeight,\n\t\tupdatedWeight,\n\t\ttotalWeight,\n\t\tbenchLen,\n\t\tupdatedBenchLen,\n\t)\n\tb.metrics.weightBenched.Set(float64(updatedWeight))\n\tb.metrics.numBenched.Set(float64(updatedBenchLen))\n}\n\nfunc (b *queryBenchlist) reset() {\n\tb.pendingQueries = make(map[[20]byte]map[uint32]pendingQuery)\n\tb.consecutiveFailures = make(map[[20]byte]int)\n\tb.benchlistTimes = make(map[[20]byte]time.Time)\n\tb.benchlistOrder.Init()\n\tb.benchlistSet.Clear()\n\tb.metrics.weightBenched.Set(0)\n\tb.metrics.numBenched.Set(0)\n}\n\n\/\/ removeQuery returns true if the query was present\nfunc (b *queryBenchlist) removeQuery(validatorID ids.ShortID, requestID uint32) bool {\n\tkey := validatorID.Key()\n\n\tvalidatorRequests, ok := b.pendingQueries[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tquery, ok := validatorRequests[requestID]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tdelete(validatorRequests, requestID)\n\tif len(validatorRequests) == 0 {\n\t\tdelete(b.pendingQueries, key)\n\t}\n\tb.metrics.observe(validatorID, query.msgType, b.clock.Time().Sub(query.registered))\n\treturn true\n}\n<commit_msg>Fix updated weight in benchlist<commit_after>package benchlist\n\nimport (\n\t\"container\/list\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/validators\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/constants\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/timer\"\n\n\tsafemath \"github.com\/ava-labs\/avalanchego\/utils\/math\"\n)\n\n\/\/ QueryBenchlist ...\ntype QueryBenchlist interface {\n\t\/\/ RegisterQuery registers a sent query and returns whether the query is subject to benchlist\n\tRegisterQuery(validatorID ids.ShortID, requestID uint32, msgType constants.MsgType) bool\n\t\/\/ RegisterResponse registers the response to a query message\n\tRegisterResponse(validatorID ids.ShortID, requstID uint32)\n\t\/\/ QueryFailed registers that a query did not receive a response within our synchrony bound\n\tQueryFailed(validatorID ids.ShortID, requestID uint32)\n}\n\n\/\/ If a peer consistently does not respond to queries, it will\n\/\/ increase latencies on the network whenever that peer is polled.\n\/\/ If we cannot terminate the poll early, then the poll will wait\n\/\/ the full timeout before finalizing the poll and making progress.\n\/\/ This can increase network latencies to an undesirable level.\n\n\/\/ Therefore, a benchlist is used as a heurstic to immediately fail\n\/\/ queries to nodes that are consistently not responding.\n\ntype queryBenchlist struct {\n\tvdrs validators.Set\n\t\/\/ Validator ID --> Request ID --> non-empty iff\n\t\/\/ there is an outstanding request to this validator\n\t\/\/ with the corresponding requestID\n\tpendingQueries map[[20]byte]map[uint32]pendingQuery\n\t\/\/ Map of consecutive query failures\n\tconsecutiveFailures map[[20]byte]int\n\n\t\/\/ Maintain benchlist\n\tbenchlistTimes map[[20]byte]time.Time\n\tbenchlistOrder *list.List\n\tbenchlistSet ids.ShortSet\n\n\tthreshold int\n\tduration time.Duration\n\tmaxPortion float64\n\n\tclock timer.Clock\n\n\tmetrics *metrics\n\tctx *snow.Context\n\n\tlock sync.Mutex\n}\n\ntype pendingQuery struct {\n\tregistered time.Time\n\tmsgType constants.MsgType\n}\n\n\/\/ NewQueryBenchlist ...\nfunc NewQueryBenchlist(validators validators.Set, ctx *snow.Context, threshold int, duration time.Duration, maxPortion float64, summaryEnabled bool, namespace string) QueryBenchlist {\n\tmetrics := &metrics{}\n\tmetrics.Initialize(ctx, namespace, summaryEnabled)\n\n\treturn &queryBenchlist{\n\t\tpendingQueries: make(map[[20]byte]map[uint32]pendingQuery),\n\t\tconsecutiveFailures: make(map[[20]byte]int),\n\t\tbenchlistTimes: make(map[[20]byte]time.Time),\n\t\tbenchlistOrder: list.New(),\n\t\tbenchlistSet: ids.ShortSet{},\n\t\tvdrs: validators,\n\t\tthreshold: threshold,\n\t\tduration: duration,\n\t\tmaxPortion: maxPortion,\n\t\tctx: ctx,\n\t\tmetrics: metrics,\n\t}\n}\n\n\/\/ RegisterQuery attempts to register a query from [validatorID] and returns true\n\/\/ if that request should be made (not subject to benchlisting)\nfunc (b *queryBenchlist) RegisterQuery(validatorID ids.ShortID, requestID uint32, msgType constants.MsgType) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tkey := validatorID.Key()\n\tif benched := b.benched(validatorID); benched {\n\t\treturn false\n\t}\n\n\tvalidatorRequests, ok := b.pendingQueries[key]\n\tif !ok {\n\t\tvalidatorRequests = make(map[uint32]pendingQuery)\n\t\tb.pendingQueries[key] = validatorRequests\n\t}\n\n\tvalidatorRequests[requestID] = pendingQuery{\n\t\tregistered: b.clock.Time(),\n\t\tmsgType: msgType,\n\t}\n\treturn true\n}\n\n\/\/ RegisterResponse removes the query from pending\nfunc (b *queryBenchlist) RegisterResponse(validatorID ids.ShortID, requestID uint32) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tif ok := b.removeQuery(validatorID, requestID); !ok {\n\t\treturn\n\t}\n\n\t\/\/ Reset consecutive failures on success\n\tdelete(b.consecutiveFailures, validatorID.Key())\n}\n\n\/\/ QueryFailed notes a failure and benchlists [validatorID] if necessary\nfunc (b *queryBenchlist) QueryFailed(validatorID ids.ShortID, requestID uint32) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tif ok := b.removeQuery(validatorID, requestID); !ok {\n\t\treturn\n\t}\n\n\tkey := validatorID.Key()\n\t\/\/ Add a failure and benches [validatorID] if it has\n\t\/\/ passed the threshold\n\tb.consecutiveFailures[key]++\n\tif b.consecutiveFailures[key] >= b.threshold {\n\t\tb.bench(validatorID)\n\t}\n}\n\nfunc (b *queryBenchlist) bench(validatorID ids.ShortID) {\n\tif b.benchlistSet.Contains(validatorID) {\n\t\treturn\n\t}\n\n\tkey := validatorID.Key()\n\n\t\/\/ Goal:\n\t\/\/ Random end time in the range:\n\t\/\/ [max(lastEndTime,(currentTime + (duration\/2)): currentTime + duration]\n\t\/\/ This maintains the invariant that validators in benchlistOrder are\n\t\/\/ ordered by the time that they should be unbenched\n\tcurrTime := b.clock.Time()\n\tminEndTime := currTime.Add(b.duration \/ 2)\n\tif elem := b.benchlistOrder.Back(); elem != nil {\n\t\tlastValidator := elem.Value.(ids.ShortID)\n\t\tlastEndTime := b.benchlistTimes[lastValidator.Key()]\n\t\tif lastEndTime.After(minEndTime) {\n\t\t\tminEndTime = lastEndTime\n\t\t}\n\t}\n\tmaxEndTime := currTime.Add(b.duration)\n\t\/\/ Since maxEndTime is at least [duration] in the future and every element\n\t\/\/ added to benchlist was added in the past with an end time at most [duration]\n\t\/\/ in the future, this should never produce a negative duration.\n\tdiff := maxEndTime.Sub(minEndTime)\n\trandomizedEndTime := minEndTime.Add(time.Duration(rand.Float64() * float64(diff))) \/\/ #nosec G404\n\n\t\/\/ Add to benchlist times with randomized delay\n\tb.benchlistTimes[key] = randomizedEndTime\n\tb.benchlistOrder.PushBack(validatorID)\n\tb.benchlistSet.Add(validatorID)\n\tdelete(b.consecutiveFailures, key)\n\tb.ctx.Log.Debug(\n\t\t\"benching validator %s after %d consecutive failed queries for %s\",\n\t\tvalidatorID,\n\t\tb.threshold,\n\t\trandomizedEndTime.Sub(currTime),\n\t)\n\n\t\/\/ Note: there could be a memory leak if a large number of\n\t\/\/ validators were added, sampled, benched, and never sampled\n\t\/\/ again. Due to the minimum staking amount and durations this\n\t\/\/ is not a realistic concern.\n\tb.cleanup()\n}\n\n\/\/ benched checks if [validatorID] is currently benched\n\/\/ and calls cleanup if its benching period has elapsed\nfunc (b *queryBenchlist) benched(validatorID ids.ShortID) bool {\n\tkey := validatorID.Key()\n\n\tend, ok := b.benchlistTimes[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif b.clock.Time().Before(end) {\n\t\treturn true\n\t}\n\n\t\/\/ If a benched item has expired, cleanup the benchlist\n\tb.cleanup()\n\treturn false\n}\n\n\/\/ cleanup ensures that we have not benched too much stake\n\/\/ and removes anything from the benchlist whose time has expired\nfunc (b *queryBenchlist) cleanup() {\n\tcurrentWeight, err := b.vdrs.SubsetWeight(b.benchlistSet)\n\tif err != nil {\n\t\t\/\/ Add log for this, should never happen\n\t\tb.ctx.Log.Error(\"failed to calculate subset weight due to: %w... Resetting benchlist\", err)\n\t\tb.reset()\n\t\treturn\n\t}\n\n\tbenchLen := b.benchlistSet.Len()\n\tupdatedWeight := currentWeight\n\ttotalWeight := b.vdrs.Weight()\n\tmaxBenchlistWeight := uint64(float64(totalWeight) * b.maxPortion)\n\n\t\/\/ Iterate over elements of the benchlist in order of expiration\n\tfor e := b.benchlistOrder.Front(); e != nil; e = e.Next() {\n\t\tvalidatorID := e.Value.(ids.ShortID)\n\t\tkey := validatorID.Key()\n\t\tend := b.benchlistTimes[key]\n\t\t\/\/ Remove elements with the next expiration until the next item has not\n\t\t\/\/ expired and the bench has less than the maximum weight\n\t\t\/\/ Note: this creates an edge case where benchlisting a validator\n\t\t\/\/ with a sufficient stake may clear the benchlist\n\t\tif b.clock.Time().Before(end) && updatedWeight < maxBenchlistWeight {\n\t\t\tbreak\n\t\t}\n\n\t\tremoveWeight, ok := b.vdrs.GetWeight(validatorID)\n\t\tif ok {\n\t\t\tnewWeight, err := safemath.Sub64(updatedWeight, removeWeight)\n\t\t\tif err != nil {\n\t\t\t\tb.ctx.Log.Error(\"failed to calculate new subset weight due to: %w... Resetting benchlist\", err)\n\t\t\t\tb.reset()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tupdatedWeight = newWeight\n\t\t}\n\n\t\tb.benchlistOrder.Remove(e)\n\t\tdelete(b.benchlistTimes, key)\n\t\tb.benchlistSet.Remove(validatorID)\n\t}\n\n\tupdatedBenchLen := b.benchlistSet.Len()\n\tb.ctx.Log.Debug(\"benchlist weight: (%d\/%d) -> (%d\/%d). Benched Validators: %d -> %d\",\n\t\tcurrentWeight,\n\t\ttotalWeight,\n\t\tupdatedWeight,\n\t\ttotalWeight,\n\t\tbenchLen,\n\t\tupdatedBenchLen,\n\t)\n\tb.metrics.weightBenched.Set(float64(updatedWeight))\n\tb.metrics.numBenched.Set(float64(updatedBenchLen))\n}\n\nfunc (b *queryBenchlist) reset() {\n\tb.pendingQueries = make(map[[20]byte]map[uint32]pendingQuery)\n\tb.consecutiveFailures = make(map[[20]byte]int)\n\tb.benchlistTimes = make(map[[20]byte]time.Time)\n\tb.benchlistOrder.Init()\n\tb.benchlistSet.Clear()\n\tb.metrics.weightBenched.Set(0)\n\tb.metrics.numBenched.Set(0)\n}\n\n\/\/ removeQuery returns true if the query was present\nfunc (b *queryBenchlist) removeQuery(validatorID ids.ShortID, requestID uint32) bool {\n\tkey := validatorID.Key()\n\n\tvalidatorRequests, ok := b.pendingQueries[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tquery, ok := validatorRequests[requestID]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tdelete(validatorRequests, requestID)\n\tif len(validatorRequests) == 0 {\n\t\tdelete(b.pendingQueries, key)\n\t}\n\tb.metrics.observe(validatorID, query.msgType, b.clock.Time().Sub(query.registered))\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package gce\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"v.io\/core\/veyron2\"\n\t\"v.io\/core\/veyron2\/context\"\n\t\"v.io\/core\/veyron2\/ipc\"\n\n\t\"v.io\/core\/veyron\/lib\/appcycle\"\n\t\"v.io\/core\/veyron\/lib\/flags\"\n\t\"v.io\/core\/veyron\/lib\/netstate\"\n\t\"v.io\/core\/veyron\/profiles\/internal\/gce\"\n\t_ \"v.io\/core\/veyron\/runtimes\/google\/ipc\/protocols\/tcp\"\n\t_ \"v.io\/core\/veyron\/runtimes\/google\/ipc\/protocols\/ws\"\n\t_ \"v.io\/core\/veyron\/runtimes\/google\/ipc\/protocols\/wsh\"\n\tgrt \"v.io\/core\/veyron\/runtimes\/google\/rt\"\n)\n\nvar (\n\tcommonFlags *flags.Flags\n)\n\nfunc init() {\n\tcommonFlags = flags.CreateAndRegister(flag.CommandLine, flags.Listen)\n\tveyron2.RegisterProfileInit(Init)\n}\n\nfunc Init(ctx *context.T) (veyron2.RuntimeX, *context.T, veyron2.Shutdown, error) {\n\tif !gce.RunningOnGCE() {\n\t\treturn nil, nil, nil, fmt.Errorf(\"GCE profile used on a non-GCE system\")\n\t}\n\n\truntime := &grt.RuntimeX{}\n\tctx, shutdown, err := runtime.Init(ctx, nil)\n\tif err != nil {\n\t\treturn nil, nil, shutdown, err\n\t}\n\tveyron2.GetLogger(ctx).VI(1).Infof(\"Initializing GCE profile.\")\n\n\tlf := commonFlags.ListenFlags()\n\tlistenSpec := ipc.ListenSpec{\n\t\tAddrs: ipc.ListenAddrs(lf.Addrs),\n\t\tProxy: lf.ListenProxy,\n\t}\n\n\tif ip, err := gce.ExternalIPAddress(); err != nil {\n\t\treturn nil, nil, shutdown, err\n\t} else {\n\t\tlistenSpec.AddressChooser = func(network string, addrs []ipc.Address) ([]ipc.Address, error) {\n\t\t\treturn []ipc.Address{&netstate.AddrIfc{&net.IPAddr{IP: ip}, \"gce-nat\", nil}}, nil\n\t\t}\n\t}\n\tctx = runtime.SetListenSpec(ctx, listenSpec)\n\n\tac := appcycle.New()\n\tctx = runtime.SetAppCycle(ctx, ac)\n\n\tprofileShutdown := func() {\n\t\tshutdown()\n\t\tac.Shutdown()\n\t}\n\n\treturn runtime, ctx, profileShutdown, nil\n}\n<commit_msg>veyron\/profiles\/gce: TBR: Add missing build tag.<commit_after>\/\/ +build linux\n\n\/\/ Package gce provides a profile for Google Compute Engine and should be\n\/\/ used by binaries that only ever expect to be run on GCE.\npackage gce\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"v.io\/core\/veyron2\"\n\t\"v.io\/core\/veyron2\/context\"\n\t\"v.io\/core\/veyron2\/ipc\"\n\n\t\"v.io\/core\/veyron\/lib\/appcycle\"\n\t\"v.io\/core\/veyron\/lib\/flags\"\n\t\"v.io\/core\/veyron\/lib\/netstate\"\n\t\"v.io\/core\/veyron\/profiles\/internal\/gce\"\n\t_ \"v.io\/core\/veyron\/runtimes\/google\/ipc\/protocols\/tcp\"\n\t_ \"v.io\/core\/veyron\/runtimes\/google\/ipc\/protocols\/ws\"\n\t_ \"v.io\/core\/veyron\/runtimes\/google\/ipc\/protocols\/wsh\"\n\tgrt \"v.io\/core\/veyron\/runtimes\/google\/rt\"\n)\n\nvar (\n\tcommonFlags *flags.Flags\n)\n\nfunc init() {\n\tcommonFlags = flags.CreateAndRegister(flag.CommandLine, flags.Listen)\n\tveyron2.RegisterProfileInit(Init)\n}\n\nfunc Init(ctx *context.T) (veyron2.RuntimeX, *context.T, veyron2.Shutdown, error) {\n\tif !gce.RunningOnGCE() {\n\t\treturn nil, nil, nil, fmt.Errorf(\"GCE profile used on a non-GCE system\")\n\t}\n\n\truntime := &grt.RuntimeX{}\n\tctx, shutdown, err := runtime.Init(ctx, nil)\n\tif err != nil {\n\t\treturn nil, nil, shutdown, err\n\t}\n\tveyron2.GetLogger(ctx).VI(1).Infof(\"Initializing GCE profile.\")\n\n\tlf := commonFlags.ListenFlags()\n\tlistenSpec := ipc.ListenSpec{\n\t\tAddrs: ipc.ListenAddrs(lf.Addrs),\n\t\tProxy: lf.ListenProxy,\n\t}\n\n\tif ip, err := gce.ExternalIPAddress(); err != nil {\n\t\treturn nil, nil, shutdown, err\n\t} else {\n\t\tlistenSpec.AddressChooser = func(network string, addrs []ipc.Address) ([]ipc.Address, error) {\n\t\t\treturn []ipc.Address{&netstate.AddrIfc{&net.IPAddr{IP: ip}, \"gce-nat\", nil}}, nil\n\t\t}\n\t}\n\tctx = runtime.SetListenSpec(ctx, listenSpec)\n\n\tac := appcycle.New()\n\tctx = runtime.SetAppCycle(ctx, ac)\n\n\tprofileShutdown := func() {\n\t\tshutdown()\n\t\tac.Shutdown()\n\t}\n\n\treturn runtime, ctx, profileShutdown, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cassandra\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gocql\/gocql\"\n\t\"github.com\/raintank\/met\"\n\t\"github.com\/raintank\/metrictank\/idx\"\n\t\"github.com\/raintank\/metrictank\/idx\/memory\"\n\t\"github.com\/raintank\/worldping-api\/pkg\/log\"\n\t\"github.com\/rakyll\/globalconf\"\n\t\"gopkg.in\/raintank\/schema.v1\"\n)\n\nconst keyspace_schema = `CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true`\nconst table_schema = `CREATE TABLE IF NOT EXISTS %s.metric_def_idx (\n id text PRIMARY KEY,\n def blob,\n) WITH compaction = {'class': 'SizeTieredCompactionStrategy'}\n AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}`\n\nvar (\n\tidxCasOk met.Count\n\tidxCasFail met.Count\n\tidxCasAddDuration met.Timer\n\tidxCasDeleteDuration met.Timer\n\n\tEnabled bool\n\tkeyspace string\n\thosts string\n\tconsistency string\n\ttimeout time.Duration\n\tnumConns int\n\twriteQueueSize int\n\tprotoVer int\n\tmaxStale time.Duration\n\tpruneInterval time.Duration\n)\n\nfunc ConfigSetup() {\n\tcasIdx := flag.NewFlagSet(\"cassandra-idx\", flag.ExitOnError)\n\tcasIdx.BoolVar(&Enabled, \"enabled\", false, \"\")\n\tcasIdx.StringVar(&keyspace, \"keyspace\", \"metric\", \"Cassandra keyspace to store metricDefinitions in.\")\n\tcasIdx.StringVar(&hosts, \"hosts\", \"localhost:9042\", \"comma separated list of cassandra addresses in host:port form\")\n\tcasIdx.StringVar(&consistency, \"consistency\", \"one\", \"write consistency (any|one|two|three|quorum|all|local_quorum|each_quorum|local_one\")\n\tcasIdx.DurationVar(&timeout, \"timeout\", time.Second, \"cassandra request timeout\")\n\tcasIdx.IntVar(&numConns, \"num-conns\", 10, \"number of concurrent connections to cassandra\")\n\tcasIdx.IntVar(&writeQueueSize, \"write-queue-size\", 100000, \"Max number of metricDefs allowed to be unwritten to cassandra\")\n\tcasIdx.IntVar(&protoVer, \"protocol-version\", 4, \"cql protocol version to use\")\n\tcasIdx.DurationVar(&maxStale, \"max-stale\", 0, \"clear series from the index if they have not been seen for this much time.\")\n\tcasIdx.DurationVar(&pruneInterval, \"prune-interval\", time.Hour*3, \"Interval at which the index should be checked for stale series.\")\n\tglobalconf.Register(\"cassandra-idx\", casIdx)\n}\n\ntype writeReq struct {\n\tdef *schema.MetricDefinition\n\trecvTime time.Time\n}\n\n\/\/ Implements the the \"MetricIndex\" interface\ntype CasIdx struct {\n\tmemory.MemoryIdx\n\tcluster *gocql.ClusterConfig\n\tsession *gocql.Session\n\twriteQueue chan writeReq\n\tshutdown chan struct{}\n\twg sync.WaitGroup\n}\n\nfunc New() *CasIdx {\n\tcluster := gocql.NewCluster(strings.Split(hosts, \",\")...)\n\tcluster.Consistency = gocql.ParseConsistency(consistency)\n\tcluster.Timeout = timeout\n\tcluster.NumConns = numConns\n\tcluster.ProtoVersion = protoVer\n\n\treturn &CasIdx{\n\t\tMemoryIdx: *memory.New(),\n\t\tcluster: cluster,\n\t\twriteQueue: make(chan writeReq, writeQueueSize),\n\t\tshutdown: make(chan struct{}),\n\t}\n}\n\nfunc (c *CasIdx) Init(stats met.Backend) error {\n\tlog.Info(\"initializing cassandra-idx. Hosts=%s\", hosts)\n\tif err := c.MemoryIdx.Init(stats); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\ttmpSession, err := c.cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to create cassandra session. %s\", err)\n\t\treturn err\n\t}\n\n\t\/\/ ensure the keyspace and table exist.\n\terr = tmpSession.Query(fmt.Sprintf(keyspace_schema, keyspace)).Exec()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to initialize cassandra keyspace. %s\", err)\n\t\treturn err\n\t}\n\terr = tmpSession.Query(fmt.Sprintf(table_schema, keyspace)).Exec()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to initialize cassandra table. %s\", err)\n\t\treturn err\n\t}\n\ttmpSession.Close()\n\tc.cluster.Keyspace = keyspace\n\tsession, err := c.cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to create cassandra session. %s\", err)\n\t\treturn err\n\t}\n\n\tc.session = session\n\n\tidxCasOk = stats.NewCount(\"idx.cassandra.ok\")\n\tidxCasFail = stats.NewCount(\"idx.cassandra.fail\")\n\tidxCasAddDuration = stats.NewTimer(\"idx.cassandra.add_duration\", 0)\n\tidxCasDeleteDuration = stats.NewTimer(\"idx.cassandra.delete_duration\", 0)\n\n\tfor i := 0; i < numConns; i++ {\n\t\tc.wg.Add(1)\n\t\tgo c.processWriteQueue()\n\t}\n\t\/\/Rebuild the in-memory index.\n\n\tc.rebuildIndex()\n\tif maxStale > 0 {\n\t\tif pruneInterval == 0 {\n\t\t\treturn fmt.Errorf(\"pruneInterval must be greater then 0\")\n\t\t}\n\t\tgo c.prune()\n\t}\n\treturn nil\n}\n\nfunc (c *CasIdx) Stop() {\n\tlog.Info(\"cassandra-idx stopping\")\n\tc.MemoryIdx.Stop()\n\tclose(c.writeQueue)\n\tc.wg.Wait()\n\tc.session.Close()\n}\n\nfunc (c *CasIdx) Add(data *schema.MetricData) {\n\texisting, err := c.MemoryIdx.Get(data.Id)\n\tinMemory := true\n\tif err != nil {\n\t\tif err == idx.DefNotFound {\n\t\t\tinMemory = false\n\t\t} else {\n\t\t\tlog.Error(3, \"cassandra-idx Failed to query Memory Index for %s. %s\", data.Id, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif inMemory {\n\t\tlog.Debug(\"cassandra-idx def already seen before. Just updating memory Index\")\n\t\texisting.LastUpdate = data.Time\n\t\tc.MemoryIdx.AddDef(&existing)\n\t\treturn\n\t}\n\tdef := schema.MetricDefinitionFromMetricData(data)\n\tc.MemoryIdx.AddDef(def)\n\tc.writeQueue <- writeReq{recvTime: time.Now(), def: def}\n}\n\nfunc (c *CasIdx) rebuildIndex() {\n\tlog.Info(\"cassandra-idx Rebuilding Memory Index from metricDefinitions in Cassandra\")\n\tpre := time.Now()\n\tdefs := make([]schema.MetricDefinition, 0)\n\titer := c.session.Query(\"SELECT def from metric_def_idx\").Iter()\n\n\tvar data []byte\n\tmdef := schema.MetricDefinition{}\n\tfor iter.Scan(&data) {\n\t\t_, err := mdef.UnmarshalMsg(data)\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx Bad definition in index. %s - %s\", data, err)\n\t\t\tcontinue\n\t\t}\n\t\tdefs = append(defs, mdef)\n\t}\n\tc.MemoryIdx.Load(defs)\n\tlog.Info(\"Rebuilding Memory Index Complete. Took %s\", time.Since(pre).String())\n}\n\nfunc (c *CasIdx) processWriteQueue() {\n\tlog.Info(\"cassandra-idx writeQueue handler started.\")\n\tdata := make([]byte, 0)\n\tvar success bool\n\tvar attempts int\n\tvar err error\n\tvar req writeReq\n\tfor req = range c.writeQueue {\n\t\tdata = data[:0]\n\t\tdata, err = req.def.MarshalMsg(data)\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"Failed to marshal metricDef. %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsuccess = false\n\t\tattempts = 0\n\t\tfor !success {\n\t\t\tif err := c.session.Query(`INSERT INTO metric_def_idx (id, def) VALUES (?, ?)`, req.def.Id, data).Exec(); err != nil {\n\t\t\t\tidxCasFail.Inc(1)\n\t\t\t\tif (attempts % 20) == 0 {\n\t\t\t\t\tlog.Warn(\"cassandra-idx Failed to write def to cassandra. it will be retried. %s\", err)\n\t\t\t\t}\n\t\t\t\tsleepTime := 100 * attempts\n\t\t\t\tif sleepTime > 2000 {\n\t\t\t\t\tsleepTime = 2000\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Duration(sleepTime) * time.Millisecond)\n\t\t\t\tattempts++\n\t\t\t} else {\n\t\t\t\tsuccess = true\n\t\t\t\tidxCasAddDuration.Value(time.Since(req.recvTime))\n\t\t\t\tidxCasOk.Inc(1)\n\t\t\t\tlog.Debug(\"cassandra-idx metricDef saved to cassandra. %s\", req.def.Id)\n\t\t\t}\n\t\t}\n\t}\n\tlog.Info(\"cassandra-idx writeQueue handler ended.\")\n\tc.wg.Done()\n}\n\nfunc (c *CasIdx) Delete(orgId int, pattern string) error {\n\tids, err := c.MemoryIdx.DeleteWithReport(orgId, pattern)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, id := range ids {\n\t\terr := c.session.Query(\"DELETE FROM metric_def_idx where id=?\", id).Exec()\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx Failed to delete metricDef %s from cassandra. %s\", id, err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *CasIdx) Prune(orgId int, oldest time.Time) ([]schema.MetricDefinition, error) {\n\tpruned, err := c.MemoryIdx.Prune(orgId, oldest)\n\t\/\/ if an error was encountered then pruned is probably a partial list of metricDefs\n\t\/\/ deleted, so lets still try and delete these from Cassandra.\n\tfor _, def := range pruned {\n\t\terr := c.session.Query(\"DELETE FROM metric_def_idx where id=?\", def.Id).Exec()\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx Failed to delete metricDef %s from cassandra. %s\", def.Id, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn pruned, err\n}\n\nfunc (c *CasIdx) prune() {\n\tticker := time.NewTicker(pruneInterval)\n\tfor range ticker.C {\n\t\tstaleTs := time.Now().Add(maxStale * -1)\n\t\t_, err := c.Prune(-1, staleTs)\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx: prune error. %s\", err)\n\t\t}\n\t}\n}\n<commit_msg>better handling of errors deleting from cassandra.<commit_after>package cassandra\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gocql\/gocql\"\n\t\"github.com\/raintank\/met\"\n\t\"github.com\/raintank\/metrictank\/idx\"\n\t\"github.com\/raintank\/metrictank\/idx\/memory\"\n\t\"github.com\/raintank\/worldping-api\/pkg\/log\"\n\t\"github.com\/rakyll\/globalconf\"\n\t\"gopkg.in\/raintank\/schema.v1\"\n)\n\nconst keyspace_schema = `CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true`\nconst table_schema = `CREATE TABLE IF NOT EXISTS %s.metric_def_idx (\n id text PRIMARY KEY,\n def blob,\n) WITH compaction = {'class': 'SizeTieredCompactionStrategy'}\n AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}`\n\nvar (\n\tidxCasOk met.Count\n\tidxCasFail met.Count\n\tidxCasAddDuration met.Timer\n\tidxCasDeleteDuration met.Timer\n\n\tEnabled bool\n\tkeyspace string\n\thosts string\n\tconsistency string\n\ttimeout time.Duration\n\tnumConns int\n\twriteQueueSize int\n\tprotoVer int\n\tmaxStale time.Duration\n\tpruneInterval time.Duration\n)\n\nfunc ConfigSetup() {\n\tcasIdx := flag.NewFlagSet(\"cassandra-idx\", flag.ExitOnError)\n\tcasIdx.BoolVar(&Enabled, \"enabled\", false, \"\")\n\tcasIdx.StringVar(&keyspace, \"keyspace\", \"metric\", \"Cassandra keyspace to store metricDefinitions in.\")\n\tcasIdx.StringVar(&hosts, \"hosts\", \"localhost:9042\", \"comma separated list of cassandra addresses in host:port form\")\n\tcasIdx.StringVar(&consistency, \"consistency\", \"one\", \"write consistency (any|one|two|three|quorum|all|local_quorum|each_quorum|local_one\")\n\tcasIdx.DurationVar(&timeout, \"timeout\", time.Second, \"cassandra request timeout\")\n\tcasIdx.IntVar(&numConns, \"num-conns\", 10, \"number of concurrent connections to cassandra\")\n\tcasIdx.IntVar(&writeQueueSize, \"write-queue-size\", 100000, \"Max number of metricDefs allowed to be unwritten to cassandra\")\n\tcasIdx.IntVar(&protoVer, \"protocol-version\", 4, \"cql protocol version to use\")\n\tcasIdx.DurationVar(&maxStale, \"max-stale\", 0, \"clear series from the index if they have not been seen for this much time.\")\n\tcasIdx.DurationVar(&pruneInterval, \"prune-interval\", time.Hour*3, \"Interval at which the index should be checked for stale series.\")\n\tglobalconf.Register(\"cassandra-idx\", casIdx)\n}\n\ntype writeReq struct {\n\tdef *schema.MetricDefinition\n\trecvTime time.Time\n}\n\n\/\/ Implements the the \"MetricIndex\" interface\ntype CasIdx struct {\n\tmemory.MemoryIdx\n\tcluster *gocql.ClusterConfig\n\tsession *gocql.Session\n\twriteQueue chan writeReq\n\tshutdown chan struct{}\n\twg sync.WaitGroup\n}\n\nfunc New() *CasIdx {\n\tcluster := gocql.NewCluster(strings.Split(hosts, \",\")...)\n\tcluster.Consistency = gocql.ParseConsistency(consistency)\n\tcluster.Timeout = timeout\n\tcluster.NumConns = numConns\n\tcluster.ProtoVersion = protoVer\n\n\treturn &CasIdx{\n\t\tMemoryIdx: *memory.New(),\n\t\tcluster: cluster,\n\t\twriteQueue: make(chan writeReq, writeQueueSize),\n\t\tshutdown: make(chan struct{}),\n\t}\n}\n\nfunc (c *CasIdx) Init(stats met.Backend) error {\n\tlog.Info(\"initializing cassandra-idx. Hosts=%s\", hosts)\n\tif err := c.MemoryIdx.Init(stats); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\ttmpSession, err := c.cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to create cassandra session. %s\", err)\n\t\treturn err\n\t}\n\n\t\/\/ ensure the keyspace and table exist.\n\terr = tmpSession.Query(fmt.Sprintf(keyspace_schema, keyspace)).Exec()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to initialize cassandra keyspace. %s\", err)\n\t\treturn err\n\t}\n\terr = tmpSession.Query(fmt.Sprintf(table_schema, keyspace)).Exec()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to initialize cassandra table. %s\", err)\n\t\treturn err\n\t}\n\ttmpSession.Close()\n\tc.cluster.Keyspace = keyspace\n\tsession, err := c.cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Error(3, \"cassandra-idx failed to create cassandra session. %s\", err)\n\t\treturn err\n\t}\n\n\tc.session = session\n\n\tidxCasOk = stats.NewCount(\"idx.cassandra.ok\")\n\tidxCasFail = stats.NewCount(\"idx.cassandra.fail\")\n\tidxCasAddDuration = stats.NewTimer(\"idx.cassandra.add_duration\", 0)\n\tidxCasDeleteDuration = stats.NewTimer(\"idx.cassandra.delete_duration\", 0)\n\n\tfor i := 0; i < numConns; i++ {\n\t\tc.wg.Add(1)\n\t\tgo c.processWriteQueue()\n\t}\n\t\/\/Rebuild the in-memory index.\n\n\tc.rebuildIndex()\n\tif maxStale > 0 {\n\t\tif pruneInterval == 0 {\n\t\t\treturn fmt.Errorf(\"pruneInterval must be greater then 0\")\n\t\t}\n\t\tgo c.prune()\n\t}\n\treturn nil\n}\n\nfunc (c *CasIdx) Stop() {\n\tlog.Info(\"cassandra-idx stopping\")\n\tc.MemoryIdx.Stop()\n\tclose(c.writeQueue)\n\tc.wg.Wait()\n\tc.session.Close()\n}\n\nfunc (c *CasIdx) Add(data *schema.MetricData) {\n\texisting, err := c.MemoryIdx.Get(data.Id)\n\tinMemory := true\n\tif err != nil {\n\t\tif err == idx.DefNotFound {\n\t\t\tinMemory = false\n\t\t} else {\n\t\t\tlog.Error(3, \"cassandra-idx Failed to query Memory Index for %s. %s\", data.Id, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif inMemory {\n\t\tlog.Debug(\"cassandra-idx def already seen before. Just updating memory Index\")\n\t\texisting.LastUpdate = data.Time\n\t\tc.MemoryIdx.AddDef(&existing)\n\t\treturn\n\t}\n\tdef := schema.MetricDefinitionFromMetricData(data)\n\tc.MemoryIdx.AddDef(def)\n\tc.writeQueue <- writeReq{recvTime: time.Now(), def: def}\n}\n\nfunc (c *CasIdx) rebuildIndex() {\n\tlog.Info(\"cassandra-idx Rebuilding Memory Index from metricDefinitions in Cassandra\")\n\tpre := time.Now()\n\tdefs := make([]schema.MetricDefinition, 0)\n\titer := c.session.Query(\"SELECT def from metric_def_idx\").Iter()\n\n\tvar data []byte\n\tmdef := schema.MetricDefinition{}\n\tfor iter.Scan(&data) {\n\t\t_, err := mdef.UnmarshalMsg(data)\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx Bad definition in index. %s - %s\", data, err)\n\t\t\tcontinue\n\t\t}\n\t\tdefs = append(defs, mdef)\n\t}\n\tc.MemoryIdx.Load(defs)\n\tlog.Info(\"Rebuilding Memory Index Complete. Took %s\", time.Since(pre).String())\n}\n\nfunc (c *CasIdx) processWriteQueue() {\n\tlog.Info(\"cassandra-idx writeQueue handler started.\")\n\tdata := make([]byte, 0)\n\tvar success bool\n\tvar attempts int\n\tvar err error\n\tvar req writeReq\n\tfor req = range c.writeQueue {\n\t\tdata = data[:0]\n\t\tdata, err = req.def.MarshalMsg(data)\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"Failed to marshal metricDef. %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsuccess = false\n\t\tattempts = 0\n\t\tfor !success {\n\t\t\tif err := c.session.Query(`INSERT INTO metric_def_idx (id, def) VALUES (?, ?)`, req.def.Id, data).Exec(); err != nil {\n\t\t\t\tidxCasFail.Inc(1)\n\t\t\t\tif (attempts % 20) == 0 {\n\t\t\t\t\tlog.Warn(\"cassandra-idx Failed to write def to cassandra. it will be retried. %s\", err)\n\t\t\t\t}\n\t\t\t\tsleepTime := 100 * attempts\n\t\t\t\tif sleepTime > 2000 {\n\t\t\t\t\tsleepTime = 2000\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Duration(sleepTime) * time.Millisecond)\n\t\t\t\tattempts++\n\t\t\t} else {\n\t\t\t\tsuccess = true\n\t\t\t\tidxCasAddDuration.Value(time.Since(req.recvTime))\n\t\t\t\tidxCasOk.Inc(1)\n\t\t\t\tlog.Debug(\"cassandra-idx metricDef saved to cassandra. %s\", req.def.Id)\n\t\t\t}\n\t\t}\n\t}\n\tlog.Info(\"cassandra-idx writeQueue handler ended.\")\n\tc.wg.Done()\n}\n\nfunc (c *CasIdx) Delete(orgId int, pattern string) error {\n\tids, err := c.MemoryIdx.DeleteWithReport(orgId, pattern)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, id := range ids {\n\t\terr := c.session.Query(\"DELETE FROM metric_def_idx where id=?\", id).Exec()\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx Failed to delete metricDef %s from cassandra. %s\", id, err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *CasIdx) Prune(orgId int, oldest time.Time) ([]schema.MetricDefinition, error) {\n\tpruned, err := c.MemoryIdx.Prune(orgId, oldest)\n\t\/\/ if an error was encountered then pruned is probably a partial list of metricDefs\n\t\/\/ deleted, so lets still try and delete these from Cassandra.\n\tfor _, def := range pruned {\n\t\tattempts := 0\n\t\tdeleted := false\n\t\tfor !deleted && attempts < 5 {\n\t\t\tattempts++\n\t\t\tcErr := c.session.Query(\"DELETE FROM metric_def_idx where id=?\", def.Id).Exec()\n\t\t\tif cErr != nil {\n\t\t\t\tlog.Error(3, \"cassandra-idx Failed to delete metricDef %s from cassandra. %s\", def.Id, err)\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t} else {\n\t\t\t\tdeleted = true\n\t\t\t}\n\t\t}\n\t}\n\treturn pruned, err\n}\n\nfunc (c *CasIdx) prune() {\n\tticker := time.NewTicker(pruneInterval)\n\tfor range ticker.C {\n\t\tstaleTs := time.Now().Add(maxStale * -1)\n\t\t_, err := c.Prune(-1, staleTs)\n\t\tif err != nil {\n\t\t\tlog.Error(3, \"cassandra-idx: prune error. %s\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Walter Schulze\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage funcs\n\nimport (\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/Now returns a new now function.\nfunc Now() Int {\n\treturn &now{}\n}\n\ntype now struct{}\n\nfunc (this *now) Eval() (int64, error) {\n\treturn time.Now().UnixNano(), nil\n}\n\nfunc (this *now) Hash() uint64 {\n\treturn Hash(\"now\")\n}\n\nfunc (this *now) Compare(that Comparable) int {\n\tif this.Hash() != that.Hash() {\n\t\tif this.Hash() < that.Hash() {\n\t\t\treturn -1\n\t\t}\n\t\treturn 1\n\t}\n\tif _, ok := that.(*now); ok {\n\t\treturn 0\n\t}\n\treturn strings.Compare(this.String(), that.String())\n}\n\nfunc (this *now) String() string {\n\treturn \"now\"\n}\n\nfunc (this *now) HasVariable() bool { return true }\n\nfunc init() {\n\tRegister(\"now\", Now)\n}\n<commit_msg>now() rather than now<commit_after>\/\/ Copyright 2013 Walter Schulze\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage funcs\n\nimport (\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/Now returns a new now function.\nfunc Now() Int {\n\treturn &now{}\n}\n\ntype now struct{}\n\nfunc (this *now) Eval() (int64, error) {\n\treturn time.Now().UnixNano(), nil\n}\n\nfunc (this *now) Hash() uint64 {\n\treturn Hash(\"now\")\n}\n\nfunc (this *now) Compare(that Comparable) int {\n\tif this.Hash() != that.Hash() {\n\t\tif this.Hash() < that.Hash() {\n\t\t\treturn -1\n\t\t}\n\t\treturn 1\n\t}\n\tif _, ok := that.(*now); ok {\n\t\treturn 0\n\t}\n\treturn strings.Compare(this.String(), that.String())\n}\n\nfunc (this *now) String() string {\n\treturn \"now()\"\n}\n\nfunc (this *now) HasVariable() bool { return true }\n\nfunc init() {\n\tRegister(\"now\", Now)\n}\n<|endoftext|>"} {"text":"<commit_before>package framework\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\ntype Plugin struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tVersion string `json:\"version\"`\n\tDescription string `json:\"description\"`\n\tPermissions map[string][]string `json:\"permissions\"`\n\tAuthor Author `json:\"author\"`\n\tHomepage string `json:\"homepage\"`\n\tHideSidebar bool `json:\"hideSidebar\"`\n\tTiles []Tile `json:\"tiles\"`\n}\n\ntype Tile struct {\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tView string `json:\"view\"`\n\tSize string `json:\"size\"`\n}\n\ntype Author struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n\tHomepage string `json:\"homepage\"`\n}\n\nfunc LoadPlugins(pluginPath string) View {\n\tfiles, err := ioutil.ReadDir(pluginPath)\n\tif err != nil {\n\t\treturn Error500\n\t}\n\n\toutput := []Plugin{}\n\tfor _, v := range files {\n\t\tif _, err := os.Stat(filepath.Join(pluginPath, v.Name(), \"package.json\")); err == nil && v.IsDir() {\n\t\t\tpackageJSON, err := os.Open(filepath.Join(pluginPath, v.Name(), \"package.json\"))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar plugin Plugin\n\t\t\tdecoder := json.NewDecoder(packageJSON)\n\t\t\terr = decoder.Decode(&plugin)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif plugin.Id != v.Name() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutput = append(output, plugin)\n\n\t\t\tpackageJSON.Close()\n\t\t}\n\t}\n\tbytes, err := json.Marshal(output)\n\tif err != nil {\n\t\treturn Error500\n\t}\n\treturn &RawView{bytes, \"application\/json\"}\n}\n<commit_msg>Added Viewers to Plugin Package.json<commit_after>package framework\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\ntype Plugin struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tVersion string `json:\"version\"`\n\tDescription string `json:\"description\"`\n\tPermissions map[string][]string `json:\"permissions\"`\n\tAuthor Author `json:\"author\"`\n\tHomepage string `json:\"homepage\"`\n\tHideSidebar bool `json:\"hideSidebar\"`\n\tTiles map[string]Tile `json:\"tiles\"`\n\tViewers map[string]Viewer `json:\"viewers\"`\n}\n\ntype Viewer struct {\n\tType []string `json:\"type\"`\n\tView string `json:\"view\"`\n}\n\ntype Tile struct {\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tView string `json:\"view\"`\n\tSize string `json:\"size\"`\n}\n\ntype Author struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n\tHomepage string `json:\"homepage\"`\n}\n\nfunc LoadPlugins(pluginPath string) View {\n\tfiles, err := ioutil.ReadDir(pluginPath)\n\tif err != nil {\n\t\treturn Error500\n\t}\n\n\toutput := []Plugin{}\n\tfor _, v := range files {\n\t\tif _, err := os.Stat(filepath.Join(pluginPath, v.Name(), \"package.json\")); err == nil && v.IsDir() {\n\t\t\tpackageJSON, err := os.Open(filepath.Join(pluginPath, v.Name(), \"package.json\"))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar plugin Plugin\n\t\t\tdecoder := json.NewDecoder(packageJSON)\n\t\t\terr = decoder.Decode(&plugin)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif plugin.Id != v.Name() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutput = append(output, plugin)\n\n\t\t\tpackageJSON.Close()\n\t\t}\n\t}\n\tbytes, err := json.Marshal(output)\n\tif err != nil {\n\t\treturn Error500\n\t}\n\treturn &RawView{bytes, \"application\/json\"}\n}\n<|endoftext|>"} {"text":"<commit_before>package importer\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"log\"\r\n\t\"strings\"\r\n\r\n\tpb \"gopkg.in\/cheggaaa\/pb.v1\"\r\n\t\/\/ \"io\"\r\n\t\/\/ \"io\/ioutil\"\r\n\t\/\/ \"os\"\r\n\t\"sync\"\r\n\r\n\t\"time\"\r\n\r\n\t\"github.com\/pkg\/errors\"\r\n\t\"github.com\/wadahiro\/gitss\/server\/config\"\r\n\t\"github.com\/wadahiro\/gitss\/server\/indexer\"\r\n\t\"github.com\/wadahiro\/gitss\/server\/repo\"\r\n\t\/\/ \"github.com\/wadahiro\/gitss\/server\/util\"\r\n\t\"bytes\"\r\n\r\n\t\"golang.org\/x\/net\/html\/charset\"\r\n\t\"golang.org\/x\/text\/transform\"\r\n)\r\n\r\ntype GitImporter struct {\r\n\tconfig *config.Config\r\n\tindexer indexer.Indexer\r\n\treader *repo.GitRepoReader\r\n\tdebug bool\r\n}\r\n\r\nfunc NewGitImporter(config *config.Config, indexer indexer.Indexer) *GitImporter {\r\n\tr := repo.NewGitRepoReader(config)\r\n\treturn &GitImporter{config: config, indexer: indexer, reader: r, debug: config.Debug}\r\n}\r\n\r\nfunc (g *GitImporter) Run(organization string, project string, url string) {\r\n\tlog.Printf(\"Clone from %s %s %s\\n\", organization, project, url)\r\n\r\n\trepo, err := g.reader.CloneGitRepo(organization, project, url)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"Not found the repository: %s:%s\/%s %+v\\n\", organization, project, url, err)\r\n\t\treturn\r\n\t}\r\n\r\n\trepo.FetchAll()\r\n\r\n\tlog.Printf(\"Fetched all. %s %s %s \\n\", organization, project, url)\r\n\r\n\t\/\/ branches and tags in the git repository (include\/exclude filters are applied)\r\n\tbranchMap, tagMap, err := repo.GetLatestCommitIdsMap()\r\n\tif err != nil {\r\n\t\tlog.Printf(\"Failed to get latest commitIds of branch and tag. %+v\\n\", err)\r\n\t\treturn\r\n\t}\r\n\r\n\t\/\/ branches in the config file\r\n\tindexed := g.config.GetIndexed(organization, project, repo.Repository)\r\n\r\n\tlog.Printf(\"Start indexing for %s:%s\/%s branches: %v -> %v, tags: %v -> %v\\n\", organization, project, repo.Repository, indexed.Branches, branchMap, indexed.Tags, tagMap)\r\n\r\n\t\/\/ progress bar\r\n\tbar := pb.StartNew(0)\r\n\tbar.ShowPercent = true\r\n\r\n\tstart := time.Now()\r\n\r\n\terr = g.runIndexing(bar, repo, url, indexed, branchMap, tagMap)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"Failed to index. %+v\", err)\r\n\t\treturn\r\n\t}\r\n\r\n\t\/\/ Remove index for removed branches\r\n\tremoveBranches := []string{}\r\n\tfor ref, _ := range indexed.Branches {\r\n\t\tfound := false\r\n\t\tfor branch := range branchMap {\r\n\t\t\tif ref == branch {\r\n\t\t\t\tfound = true\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tremoveBranches = append(removeBranches, ref)\r\n\t\t}\r\n\t}\r\n\r\n\tbar.Total = bar.Total + int64(len(removeBranches))\r\n\r\n\t\/\/ Remove index for removed tags\r\n\tremoveTags := []string{}\r\n\tfor ref, _ := range indexed.Tags {\r\n\t\tfound := false\r\n\t\tfor branch := range branchMap {\r\n\t\t\tif ref == branch {\r\n\t\t\t\tfound = true\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tremoveTags = append(removeTags, ref)\r\n\t\t}\r\n\t}\r\n\r\n\tbar.Total = bar.Total + int64(len(removeTags))\r\n\r\n\tif len(removeBranches) > 0 || len(removeTags) > 0 {\r\n\t\tlog.Printf(\"Start index deleting for %s:%s\/%s (%v) (%v)\\n\", organization, project, repo.Repository, removeBranches, removeTags)\r\n\t\tg.indexer.DeleteIndexByRefs(organization, project, repo.Repository, removeBranches, removeTags)\r\n\r\n\t\tbar.Add(len(removeBranches) + len(removeTags))\r\n\r\n\t\t\/\/ Save config after deleting index completed\r\n\t\tg.config.DeleteIndexed(organization, project, repo.Repository, removeBranches, removeTags)\r\n\t}\r\n\r\n\tend := time.Now()\r\n\ttime := (end.Sub(start)).Seconds()\r\n\r\n\tbar.FinishPrint(fmt.Sprintf(\"Indexing Complete! [%f seconds] for %s:%s\/%s\\n\", time, organization, project, repo.Repository))\r\n}\r\n\r\nfunc (g *GitImporter) runIndexing(bar *pb.ProgressBar, repo *repo.GitRepo, url string, indexed config.Indexed, branchMap config.BrancheIndexedMap, tagMap config.TagIndexedMap) error {\r\n\t\/\/ collect create file entries\r\n\tcreateBranches := make(map[string]string)\r\n\tupdateBranches := make(map[string][2]string)\r\n\tfor branch, latestCommitID := range branchMap {\r\n\t\tfound := false\r\n\t\tfor indexedBranch, prevCommitID := range indexed.Branches {\r\n\t\t\tif branch == indexedBranch {\r\n\t\t\t\tfound = true\r\n\t\t\t\tif latestCommitID == prevCommitID {\r\n\t\t\t\t\tlog.Printf(\"Already up-to-date. %s\", getLoggingTag(repo, branch, latestCommitID))\r\n\t\t\t\t} else {\r\n\t\t\t\t\tupdateBranches[branch] = [2]string{prevCommitID, latestCommitID}\r\n\t\t\t\t}\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tcreateBranches[branch] = latestCommitID\r\n\t\t}\r\n\t}\r\n\r\n\tcreateTags := make(map[string]string)\r\n\tupdateTags := make(map[string][2]string)\r\n\tfor tag, latestCommitID := range tagMap {\r\n\t\tfound := false\r\n\t\tfor indexedTag, prevCommitID := range indexed.Tags {\r\n\t\t\tif tag == indexedTag {\r\n\t\t\t\tfound = true\r\n\t\t\t\tif latestCommitID == prevCommitID {\r\n\t\t\t\t\tlog.Printf(\"Already up-to-date. %s\", getLoggingTag(repo, tag, latestCommitID))\r\n\t\t\t\t} else {\r\n\t\t\t\t\tupdateTags[tag] = [2]string{prevCommitID, latestCommitID}\r\n\t\t\t\t}\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tcreateTags[tag] = latestCommitID\r\n\t\t}\r\n\t}\r\n\r\n\tqueue := make(chan indexer.FileIndexOperation, 100)\r\n\r\n\t\/\/ process\r\n\tg.UpsertIndex(queue, bar, repo, createBranches, createTags, updateBranches, updateTags)\r\n\r\n\tcallBatch := func(operations []indexer.FileIndexOperation) {\r\n\t\terr := g.indexer.BatchFileIndex(operations)\r\n\t\tif err != nil {\r\n\t\t\terrors.Errorf(\"Batch indexed error: %+v\", err)\r\n\t\t} else {\r\n\t\t\t\/\/ fmt.Printf(\"Batch indexed %d files.\\n\", len(operations))\r\n\t\t}\r\n\t\tbar.Add(len(operations))\r\n\t}\r\n\r\n\t\/\/ batch\r\n\toperations := []indexer.FileIndexOperation{}\r\n\tvar opsSize int64 = 0\r\n\tvar batchLimitSize int64 = 1024 * 1024 \/\/ 1MB\r\n\r\n\t\/\/ fmt.Println(\"start queue reading\")\r\n\r\n\tfor op := range queue {\r\n\t\toperations = append(operations, op)\r\n\t\topsSize += op.FileIndex.Size\r\n\r\n\t\t\/\/ show progress\r\n\t\t\/\/ if len(operations)%80 == 0 {\r\n\t\t\/\/ \tfmt.Printf(\"\\n\")\r\n\t\t\/\/ }\r\n\t\t\/\/ fmt.Printf(\".\")\r\n\r\n\t\tif opsSize >= batchLimitSize {\r\n\t\t\t\/\/ fmt.Printf(\"\\n\")\r\n\r\n\t\t\tcallBatch(operations)\r\n\r\n\t\t\t\/\/ reset\r\n\t\t\toperations = nil\r\n\t\t\topsSize = 0\r\n\t\t}\r\n\t}\r\n\r\n\t\/\/ remains\r\n\tif len(operations) > 0 {\r\n\t\t\/\/ fmt.Printf(\"\\n\")\r\n\t\tcallBatch(operations)\r\n\t}\r\n\r\n\t\/\/ Save config after index completed\r\n\terr := g.config.UpdateIndexed(config.Indexed{Organization: repo.Organization, Project: repo.Project, Repository: repo.Repository, Branches: branchMap, Tags: tagMap})\r\n\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Faild to update indexed.\")\r\n\t}\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc (g *GitImporter) UpsertIndex(queue chan indexer.FileIndexOperation, bar *pb.ProgressBar, r *repo.GitRepo, branchMap map[string]string, tagMap map[string]string, updateBranchMap map[string][2]string, updateTagMap map[string][2]string) error {\r\n\taddFiles, err := r.GetFileEntriesMap(branchMap, tagMap)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Failed to get file entries. branches: %v tags: %v\", branchMap, tagMap)\r\n\t}\r\n\r\n\tupdateAddFiles, delFiles, err := r.GetDiffFileEntriesMap(updateBranchMap, updateTagMap)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Failed to get diff. branches: %v tags: %v\", branchMap, tagMap)\r\n\t}\r\n\r\n\tvar wg sync.WaitGroup\r\n\r\n\twg.Add(1)\r\n\tgo func() {\r\n\t\tdefer wg.Done()\r\n\t\tg.handleAddFiles(queue, bar, r, addFiles)\r\n\t\tg.handleAddFiles(queue, bar, r, updateAddFiles)\r\n\t\tg.handleDelFiles(queue, bar, r, delFiles)\r\n\t}()\r\n\r\n\tgo func() {\r\n\t\twg.Wait()\r\n\t\tclose(queue)\r\n\t}()\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc (g *GitImporter) handleAddFiles(queue chan indexer.FileIndexOperation, bar *pb.ProgressBar, r *repo.GitRepo, addFiles map[string]repo.GitFile) {\r\n\tif len(addFiles) == 0 {\r\n\t\treturn\r\n\t}\r\n\r\n\tvar wg sync.WaitGroup\r\n\r\n\tfor blob, file := range addFiles {\r\n\t\t\/\/ check size\r\n\t\tif file.Size > g.config.SizeLimit {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\twg.Add(1)\r\n\t\tgo func(blob string, file repo.GitFile) {\r\n\t\t\tdefer wg.Done()\r\n\r\n\t\t\tfor path, loc := range file.Locations {\r\n\t\t\t\t\/\/ check contentType and retrive the file content\r\n\t\t\t\t\/\/ !! this will be heavy process !!\r\n\t\t\t\tcontentType, content, err := g.parseContent(r, blob)\r\n\t\t\t\tif err != nil {\r\n\t\t\t\t\tlog.Printf(\"Failed to parse file. [%s] - %s %+v\\n\", blob, path, err)\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t\t\/\/ return errors.Wrapf(err, \"Failed to parse file. [%s] - %s\\n\", blob, path)\r\n\t\t\t\t}\r\n\r\n\t\t\t\t\/\/ @TODO Extract text from binary in the future?\r\n\t\t\t\tif !strings.HasPrefix(contentType, \"text\/\") && contentType != \"application\/octet-stream\" {\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\r\n\t\t\t\ttext, encoding, err := readText(content)\r\n\t\t\t\tif err != nil {\r\n\t\t\t\t\ttext = string(content)\r\n\t\t\t\t\tencoding = \"utf8\"\r\n\t\t\t\t}\r\n\r\n\t\t\t\tfileIndex := indexer.FileIndex{\r\n\t\t\t\t\tMetadata: indexer.Metadata{\r\n\t\t\t\t\t\tBlob: blob,\r\n\t\t\t\t\t\tOrganization: r.Organization,\r\n\t\t\t\t\t\tProject: r.Project,\r\n\t\t\t\t\t\tRepository: r.Repository,\r\n\t\t\t\t\t\tBranches: loc.Branches,\r\n\t\t\t\t\t\tTags: loc.Tags,\r\n\t\t\t\t\t\tPath: path,\r\n\t\t\t\t\t\tExt: indexer.GetExt(path),\r\n\t\t\t\t\t\tEncoding: encoding,\r\n\t\t\t\t\t\tSize: file.Size,\r\n\t\t\t\t\t},\r\n\t\t\t\t\tContent: text,\r\n\t\t\t\t}\r\n\r\n\t\t\t\tbar.Total = bar.Total + 1\r\n\r\n\t\t\t\tqueue <- indexer.FileIndexOperation{Method: indexer.ADD, FileIndex: fileIndex}\r\n\t\t\t}\r\n\t\t}(blob, file)\r\n\t}\r\n\twg.Wait()\r\n}\r\n\r\n\/\/ How to detect encoding\r\n\/\/ http:\/\/qiita.com\/nobuhito\/items\/ff782f64e32f7ed95e43\r\nfunc readText(body []byte) (string, string, error) {\r\n\tvar f []byte\r\n\tencodings := []string{\"shift_jis\", \"utf8\"}\r\n\tvar enc string\r\n\tfor i := range encodings {\r\n\t\tenc = encodings[i]\r\n\t\tif enc != \"\" {\r\n\t\t\tee, _ := charset.Lookup(enc)\r\n\t\t\tif ee == nil {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tvar buf bytes.Buffer\r\n\t\t\tic := transform.NewWriter(&buf, ee.NewDecoder())\r\n\t\t\t_, err := ic.Write(body)\r\n\t\t\tif err != nil {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\terr = ic.Close()\r\n\t\t\tif err != nil {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tf = buf.Bytes()\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\treturn string(f), enc, nil\r\n}\r\n\r\nfunc (g *GitImporter) handleDelFiles(queue chan indexer.FileIndexOperation, bar *pb.ProgressBar, r *repo.GitRepo, delFiles map[string]repo.GitFile) {\r\n\tfor blob, file := range delFiles {\r\n\t\tfor path, loc := range file.Locations {\r\n\t\t\tfileIndex := indexer.FileIndex{\r\n\t\t\t\tMetadata: indexer.Metadata{\r\n\t\t\t\t\tBlob: blob,\r\n\t\t\t\t\tOrganization: r.Organization,\r\n\t\t\t\t\tProject: r.Project,\r\n\t\t\t\t\tRepository: r.Repository,\r\n\t\t\t\t\tBranches: loc.Branches,\r\n\t\t\t\t\tTags: loc.Tags,\r\n\t\t\t\t\tPath: path,\r\n\t\t\t\t},\r\n\t\t\t}\r\n\r\n\t\t\tbar.Total = bar.Total + 1\r\n\r\n\t\t\t\/\/ Delete index\r\n\t\t\tqueue <- indexer.FileIndexOperation{Method: indexer.DELETE, FileIndex: fileIndex}\r\n\t\t}\r\n\t}\r\n}\r\n\r\nfunc (g *GitImporter) parseContent(repo *repo.GitRepo, blob string) (string, []byte, error) {\r\n\tcontentType, content, err := repo.DetectBlobContentType(blob)\r\n\tif err != nil {\r\n\t\treturn \"\", nil, errors.Wrapf(err, \"Failed to read contentType. %s\", blob)\r\n\t}\r\n\treturn contentType, content, nil\r\n}\r\n\r\nfunc getLoggingTag(repo *repo.GitRepo, ref string, commitId string) string {\r\n\ttag := fmt.Sprintf(\"%s:%s\/%s (%s) [%s]\", repo.Organization, repo.Project, repo.Repository, ref, commitId)\r\n\treturn tag\r\n}\r\n<commit_msg>Add worker system for scanning git files<commit_after>package importer\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"log\"\r\n\t\"strings\"\r\n\r\n\tpb \"gopkg.in\/cheggaaa\/pb.v1\"\r\n\t\/\/ \"io\"\r\n\t\/\/ \"io\/ioutil\"\r\n\t\/\/ \"os\"\r\n\t\"sync\"\r\n\r\n\t\"time\"\r\n\r\n\t\"github.com\/pkg\/errors\"\r\n\t\"github.com\/wadahiro\/gitss\/server\/config\"\r\n\t\"github.com\/wadahiro\/gitss\/server\/indexer\"\r\n\t\"github.com\/wadahiro\/gitss\/server\/repo\"\r\n\t\/\/ \"github.com\/wadahiro\/gitss\/server\/util\"\r\n\t\"bytes\"\r\n\r\n\t\"golang.org\/x\/net\/html\/charset\"\r\n\t\"golang.org\/x\/text\/transform\"\r\n)\r\n\r\ntype GitImporter struct {\r\n\tconfig *config.Config\r\n\tindexer indexer.Indexer\r\n\treader *repo.GitRepoReader\r\n\tdebug bool\r\n}\r\n\r\nfunc NewGitImporter(config *config.Config, indexer indexer.Indexer) *GitImporter {\r\n\tr := repo.NewGitRepoReader(config)\r\n\treturn &GitImporter{config: config, indexer: indexer, reader: r, debug: config.Debug}\r\n}\r\n\r\nfunc (g *GitImporter) Run(organization string, project string, url string) {\r\n\tlog.Printf(\"Clone from %s %s %s\\n\", organization, project, url)\r\n\r\n\trepo, err := g.reader.CloneGitRepo(organization, project, url)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"Not found the repository: %s:%s\/%s %+v\\n\", organization, project, url, err)\r\n\t\treturn\r\n\t}\r\n\r\n\trepo.FetchAll()\r\n\r\n\tlog.Printf(\"Fetched all. %s %s %s \\n\", organization, project, url)\r\n\r\n\t\/\/ branches and tags in the git repository (include\/exclude filters are applied)\r\n\tbranchMap, tagMap, err := repo.GetLatestCommitIdsMap()\r\n\tif err != nil {\r\n\t\tlog.Printf(\"Failed to get latest commitIds of branch and tag. %+v\\n\", err)\r\n\t\treturn\r\n\t}\r\n\r\n\t\/\/ branches in the config file\r\n\tindexed := g.config.GetIndexed(organization, project, repo.Repository)\r\n\r\n\tlog.Printf(\"Start indexing for %s:%s\/%s branches: %v -> %v, tags: %v -> %v\\n\", organization, project, repo.Repository, indexed.Branches, branchMap, indexed.Tags, tagMap)\r\n\r\n\t\/\/ progress bar\r\n\tbar := pb.StartNew(0)\r\n\tbar.ShowPercent = true\r\n\r\n\tstart := time.Now()\r\n\r\n\terr = g.runIndexing(bar, repo, url, indexed, branchMap, tagMap)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"Failed to index. %+v\", err)\r\n\t\treturn\r\n\t}\r\n\r\n\t\/\/ Remove index for removed branches\r\n\tremoveBranches := []string{}\r\n\tfor ref, _ := range indexed.Branches {\r\n\t\tfound := false\r\n\t\tfor branch := range branchMap {\r\n\t\t\tif ref == branch {\r\n\t\t\t\tfound = true\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tremoveBranches = append(removeBranches, ref)\r\n\t\t}\r\n\t}\r\n\r\n\tbar.Total = bar.Total + int64(len(removeBranches))\r\n\r\n\t\/\/ Remove index for removed tags\r\n\tremoveTags := []string{}\r\n\tfor ref, _ := range indexed.Tags {\r\n\t\tfound := false\r\n\t\tfor branch := range branchMap {\r\n\t\t\tif ref == branch {\r\n\t\t\t\tfound = true\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tremoveTags = append(removeTags, ref)\r\n\t\t}\r\n\t}\r\n\r\n\tbar.Total = bar.Total + int64(len(removeTags))\r\n\r\n\tif len(removeBranches) > 0 || len(removeTags) > 0 {\r\n\t\tlog.Printf(\"Start index deleting for %s:%s\/%s (%v) (%v)\\n\", organization, project, repo.Repository, removeBranches, removeTags)\r\n\t\tg.indexer.DeleteIndexByRefs(organization, project, repo.Repository, removeBranches, removeTags)\r\n\r\n\t\tbar.Add(len(removeBranches) + len(removeTags))\r\n\r\n\t\t\/\/ Save config after deleting index completed\r\n\t\tg.config.DeleteIndexed(organization, project, repo.Repository, removeBranches, removeTags)\r\n\t}\r\n\r\n\tend := time.Now()\r\n\ttime := (end.Sub(start)).Seconds()\r\n\r\n\tbar.FinishPrint(fmt.Sprintf(\"Indexing Complete! [%f seconds] for %s:%s\/%s\\n\", time, organization, project, repo.Repository))\r\n}\r\n\r\nfunc (g *GitImporter) runIndexing(bar *pb.ProgressBar, repo *repo.GitRepo, url string, indexed config.Indexed, branchMap config.BrancheIndexedMap, tagMap config.TagIndexedMap) error {\r\n\t\/\/ collect create file entries\r\n\tcreateBranches := make(map[string]string)\r\n\tupdateBranches := make(map[string][2]string)\r\n\tfor branch, latestCommitID := range branchMap {\r\n\t\tfound := false\r\n\t\tfor indexedBranch, prevCommitID := range indexed.Branches {\r\n\t\t\tif branch == indexedBranch {\r\n\t\t\t\tfound = true\r\n\t\t\t\tif latestCommitID == prevCommitID {\r\n\t\t\t\t\tlog.Printf(\"Already up-to-date. %s\", getLoggingTag(repo, branch, latestCommitID))\r\n\t\t\t\t} else {\r\n\t\t\t\t\tupdateBranches[branch] = [2]string{prevCommitID, latestCommitID}\r\n\t\t\t\t}\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tcreateBranches[branch] = latestCommitID\r\n\t\t}\r\n\t}\r\n\r\n\tcreateTags := make(map[string]string)\r\n\tupdateTags := make(map[string][2]string)\r\n\tfor tag, latestCommitID := range tagMap {\r\n\t\tfound := false\r\n\t\tfor indexedTag, prevCommitID := range indexed.Tags {\r\n\t\t\tif tag == indexedTag {\r\n\t\t\t\tfound = true\r\n\t\t\t\tif latestCommitID == prevCommitID {\r\n\t\t\t\t\tlog.Printf(\"Already up-to-date. %s\", getLoggingTag(repo, tag, latestCommitID))\r\n\t\t\t\t} else {\r\n\t\t\t\t\tupdateTags[tag] = [2]string{prevCommitID, latestCommitID}\r\n\t\t\t\t}\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !found {\r\n\t\t\tcreateTags[tag] = latestCommitID\r\n\t\t}\r\n\t}\r\n\r\n\tqueue := make(chan indexer.FileIndexOperation, 100)\r\n\r\n\t\/\/ process\r\n\tg.UpsertIndex(queue, bar, repo, createBranches, createTags, updateBranches, updateTags)\r\n\r\n\tcallBatch := func(operations []indexer.FileIndexOperation) {\r\n\t\terr := g.indexer.BatchFileIndex(operations)\r\n\t\tif err != nil {\r\n\t\t\terrors.Errorf(\"Batch indexed error: %+v\", err)\r\n\t\t} else {\r\n\t\t\t\/\/ fmt.Printf(\"Batch indexed %d files.\\n\", len(operations))\r\n\t\t}\r\n\t\tbar.Add(len(operations))\r\n\t}\r\n\r\n\t\/\/ batch\r\n\toperations := []indexer.FileIndexOperation{}\r\n\tvar opsSize int64 = 0\r\n\tvar batchLimitSize int64 = 1024 * 512 \/\/ 512KB\r\n\r\n\t\/\/ fmt.Println(\"start queue reading\")\r\n\r\n\tfor op := range queue {\r\n\t\toperations = append(operations, op)\r\n\t\topsSize += op.FileIndex.Size\r\n\r\n\t\t\/\/ show progress\r\n\t\t\/\/ if len(operations)%80 == 0 {\r\n\t\t\/\/ \tfmt.Printf(\"\\n\")\r\n\t\t\/\/ }\r\n\t\t\/\/ fmt.Printf(\".\")\r\n\r\n\t\tif opsSize >= batchLimitSize {\r\n\t\t\t\/\/ fmt.Printf(\"\\n\")\r\n\r\n\t\t\tcallBatch(operations)\r\n\r\n\t\t\t\/\/ reset\r\n\t\t\toperations = nil\r\n\t\t\topsSize = 0\r\n\t\t}\r\n\t}\r\n\r\n\t\/\/ remains\r\n\tif len(operations) > 0 {\r\n\t\t\/\/ fmt.Printf(\"\\n\")\r\n\t\tcallBatch(operations)\r\n\t}\r\n\r\n\t\/\/ Save config after index completed\r\n\terr := g.config.UpdateIndexed(config.Indexed{Organization: repo.Organization, Project: repo.Project, Repository: repo.Repository, Branches: branchMap, Tags: tagMap})\r\n\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Faild to update indexed.\")\r\n\t}\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc (g *GitImporter) UpsertIndex(queue chan indexer.FileIndexOperation, bar *pb.ProgressBar, r *repo.GitRepo, branchMap map[string]string, tagMap map[string]string, updateBranchMap map[string][2]string, updateTagMap map[string][2]string) error {\r\n\taddFiles, err := r.GetFileEntriesMap(branchMap, tagMap)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Failed to get file entries. branches: %v tags: %v\", branchMap, tagMap)\r\n\t}\r\n\r\n\tupdateAddFiles, delFiles, err := r.GetDiffFileEntriesMap(updateBranchMap, updateTagMap)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Failed to get diff. branches: %v tags: %v\", branchMap, tagMap)\r\n\t}\r\n\r\n\tvar wg sync.WaitGroup\r\n\r\n\twg.Add(1)\r\n\tgo func() {\r\n\t\tdefer wg.Done()\r\n\t\tg.handleAddFiles(queue, bar, r, addFiles)\r\n\t\tg.handleAddFiles(queue, bar, r, updateAddFiles)\r\n\t\tg.handleDelFiles(queue, bar, r, delFiles)\r\n\t}()\r\n\r\n\tgo func() {\r\n\t\twg.Wait()\r\n\t\tclose(queue)\r\n\t}()\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc (g *GitImporter) handleAddFiles(queue chan indexer.FileIndexOperation, bar *pb.ProgressBar, r *repo.GitRepo, addFiles map[string]repo.GitFile) {\r\n\tif len(addFiles) == 0 {\r\n\t\treturn\r\n\t}\r\n\r\n\tvar wg sync.WaitGroup\r\n\tscanQueue := make(chan ScannedFile, 5)\r\n\r\n\tfor i := 0; i < 3; i++ {\r\n\t\twg.Add(1)\r\n\t\tgo scanFiles(&wg, scanQueue, queue, g, r, bar)\r\n\t}\r\n\r\n\tfor blob, file := range addFiles {\r\n\t\t\/\/ check size\r\n\t\tif file.Size > g.config.SizeLimit {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tscanQueue <- ScannedFile{Blob: blob, GitFile: file}\r\n\t}\r\n\r\n\tclose(scanQueue)\r\n\r\n\twg.Wait()\r\n}\r\n\r\ntype ScannedFile struct {\r\n\tBlob string\r\n\tGitFile repo.GitFile\r\n}\r\n\r\nfunc scanFiles(wg *sync.WaitGroup, scanQueue chan ScannedFile, queue chan indexer.FileIndexOperation, g *GitImporter, r *repo.GitRepo, bar *pb.ProgressBar) {\r\n\tdefer wg.Done()\r\n\r\n\tfor {\r\n\t\tscannedFile, ok := <-scanQueue\r\n\t\tif !ok {\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tblob := scannedFile.Blob\r\n\t\tfile := scannedFile.GitFile\r\n\r\n\t\tfor path, loc := range file.Locations {\r\n\t\t\t\/\/ check contentType and retrive the file content\r\n\t\t\t\/\/ !! this will be heavy process !!\r\n\t\t\tcontentType, content, err := g.parseContent(r, blob)\r\n\t\t\tif err != nil {\r\n\t\t\t\tlog.Printf(\"Failed to parse file. [%s] - %s %+v\\n\", blob, path, err)\r\n\t\t\t\tcontinue\r\n\t\t\t\t\/\/ return errors.Wrapf(err, \"Failed to parse file. [%s] - %s\\n\", blob, path)\r\n\t\t\t}\r\n\r\n\t\t\t\/\/ @TODO Extract text from binary in the future?\r\n\t\t\tif !strings.HasPrefix(contentType, \"text\/\") && contentType != \"application\/octet-stream\" {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\r\n\t\t\ttext, encoding, err := readText(content)\r\n\t\t\tif err != nil {\r\n\t\t\t\ttext = string(content)\r\n\t\t\t\tencoding = \"utf8\"\r\n\t\t\t}\r\n\r\n\t\t\tfileIndex := indexer.FileIndex{\r\n\t\t\t\tMetadata: indexer.Metadata{\r\n\t\t\t\t\tBlob: blob,\r\n\t\t\t\t\tOrganization: r.Organization,\r\n\t\t\t\t\tProject: r.Project,\r\n\t\t\t\t\tRepository: r.Repository,\r\n\t\t\t\t\tBranches: loc.Branches,\r\n\t\t\t\t\tTags: loc.Tags,\r\n\t\t\t\t\tPath: path,\r\n\t\t\t\t\tExt: indexer.GetExt(path),\r\n\t\t\t\t\tEncoding: encoding,\r\n\t\t\t\t\tSize: file.Size,\r\n\t\t\t\t},\r\n\t\t\t\tContent: text,\r\n\t\t\t}\r\n\r\n\t\t\tbar.Total = bar.Total + 1\r\n\r\n\t\t\tqueue <- indexer.FileIndexOperation{Method: indexer.ADD, FileIndex: fileIndex}\r\n\t\t}\r\n\t}\r\n}\r\n\r\n\/\/ How to detect encoding\r\n\/\/ http:\/\/qiita.com\/nobuhito\/items\/ff782f64e32f7ed95e43\r\nfunc readText(body []byte) (string, string, error) {\r\n\tvar f []byte\r\n\tencodings := []string{\"shift_jis\", \"utf8\"}\r\n\tvar enc string\r\n\tfor i := range encodings {\r\n\t\tenc = encodings[i]\r\n\t\tif enc != \"\" {\r\n\t\t\tee, _ := charset.Lookup(enc)\r\n\t\t\tif ee == nil {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tvar buf bytes.Buffer\r\n\t\t\tic := transform.NewWriter(&buf, ee.NewDecoder())\r\n\t\t\t_, err := ic.Write(body)\r\n\t\t\tif err != nil {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\terr = ic.Close()\r\n\t\t\tif err != nil {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tf = buf.Bytes()\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\treturn string(f), enc, nil\r\n}\r\n\r\nfunc (g *GitImporter) handleDelFiles(queue chan indexer.FileIndexOperation, bar *pb.ProgressBar, r *repo.GitRepo, delFiles map[string]repo.GitFile) {\r\n\tfor blob, file := range delFiles {\r\n\t\tfor path, loc := range file.Locations {\r\n\t\t\tfileIndex := indexer.FileIndex{\r\n\t\t\t\tMetadata: indexer.Metadata{\r\n\t\t\t\t\tBlob: blob,\r\n\t\t\t\t\tOrganization: r.Organization,\r\n\t\t\t\t\tProject: r.Project,\r\n\t\t\t\t\tRepository: r.Repository,\r\n\t\t\t\t\tBranches: loc.Branches,\r\n\t\t\t\t\tTags: loc.Tags,\r\n\t\t\t\t\tPath: path,\r\n\t\t\t\t},\r\n\t\t\t}\r\n\r\n\t\t\tbar.Total = bar.Total + 1\r\n\r\n\t\t\t\/\/ Delete index\r\n\t\t\tqueue <- indexer.FileIndexOperation{Method: indexer.DELETE, FileIndex: fileIndex}\r\n\t\t}\r\n\t}\r\n}\r\n\r\nfunc (g *GitImporter) parseContent(repo *repo.GitRepo, blob string) (string, []byte, error) {\r\n\tcontentType, content, err := repo.DetectBlobContentType(blob)\r\n\tif err != nil {\r\n\t\treturn \"\", nil, errors.Wrapf(err, \"Failed to read contentType. %s\", blob)\r\n\t}\r\n\treturn contentType, content, nil\r\n}\r\n\r\nfunc getLoggingTag(repo *repo.GitRepo, ref string, commitId string) string {\r\n\ttag := fmt.Sprintf(\"%s:%s\/%s (%s) [%s]\", repo.Organization, repo.Project, repo.Repository, ref, commitId)\r\n\treturn tag\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package imageutil\n\nimport (\n\t\"image\"\n\n\t\"golang.org\/x\/image\/draw\"\n)\n\nfunc OverlayCenterYLeftAlign(imgBg, imgOver image.Image) image.Image {\n\toutput := image.NewRGBA(imgBg.Bounds())\n\tdraw.Draw(output, imgBg.Bounds(), imgBg, image.ZP, draw.Src)\n\n\th1 := imgBg.Bounds().Dy()\n\th2 := imgOver.Bounds().Dy()\n\toffset := image.Pt(0, (h1-h2)\/2)\n\n\tdraw.Draw(output, imgOver.Bounds().Add(offset), imgOver, image.Point{}, draw.Src)\n\treturn output\n}\n\nfunc MergeXSameY(images []image.Image, larger bool) image.Image {\n\tif len(images) == 0 {\n\t\treturn nil\n\t} else if len(images) == 1 {\n\t\treturn images[0]\n\t}\n\timages = ResizeSameY(images, larger)\n\t_, _, minY, _, sumX, _ := SliceXY(images, -1)\n\toutput := image.NewRGBA(image.Rect(0, 0, sumX, minY))\n\tsumXPrev := 0\n\tfor i, img := range images {\n\t\tif i == 0 {\n\t\t\tdraw.Draw(output, img.Bounds(), img, image.Point{}, draw.Src)\n\t\t} else {\n\t\t\timgOffset := image.Pt(sumXPrev, 0)\n\t\t\tdraw.Draw(output, img.Bounds().Add(imgOffset), img, image.Point{}, draw.Src)\n\t\t}\n\t\tsumXPrev += img.Bounds().Dx()\n\t}\n\treturn output\n}\n\nfunc mergeXSameYTwo(img1, img2 image.Image, larger bool) image.Image {\n\timg1, img2 = ResizeSameYTwo(img1, img2, larger)\n\toutput := image.NewRGBA(\n\t\timage.Rect(0, 0,\n\t\t\timg1.Bounds().Dx()+img2.Bounds().Dx(),\n\t\t\timg1.Bounds().Dy()))\n\tdraw.Draw(output, img1.Bounds(), img1, image.Point{}, draw.Src)\n\timg2Offset := image.Pt(img1.Bounds().Dx(), 0)\n\tdraw.Draw(output, img2.Bounds().Add(img2Offset), img2, image.Point{}, draw.Src)\n\treturn output\n}\n\nfunc MergeXSameYRead(locations []string, larger bool) (image.Image, error) {\n\timages, err := ReadImages(locations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn MergeXSameY(images, true), nil\n\t\/*\n\t\timg1, _, err := ReadImageAny(location1)\n\t\tif err != nil {\n\t\t\treturn img1, err\n\t\t}\n\t\timg2, _, err := ReadImageAny(location2)\n\t\tif err != nil {\n\t\t\treturn img2, err\n\t\t}\n\t\treturn MergeXSameY(img1, img2, true), nil\n\t*\/\n}\n\nfunc MergeYSameX(images []image.Image, larger bool) image.Image {\n\tif len(images) == 0 {\n\t\treturn nil\n\t} else if len(images) == 1 {\n\t\treturn images[0]\n\t}\n\timages = ResizeSameX(images, larger)\n\tminX, _, _, _, _, sumY := SliceXY(images, -1)\n\toutput := image.NewRGBA(image.Rect(0, 0, minX, sumY))\n\tsumYPrev := 0\n\tfor i, img := range images {\n\t\tif i == 0 {\n\t\t\tdraw.Draw(output, img.Bounds(), img, image.Point{}, draw.Src)\n\t\t} else {\n\t\t\timgOffset := image.Pt(0, sumYPrev)\n\t\t\tdraw.Draw(output, img.Bounds().Add(imgOffset), img, image.Point{}, draw.Src)\n\t\t}\n\t\tsumYPrev += img.Bounds().Dy()\n\t}\n\treturn output\n}\n\nfunc mergeYSameXTwo(img1, img2 image.Image, larger bool) image.Image {\n\timg1, img2 = ResizeSameXTwo(img1, img2, larger)\n\toutput := image.NewRGBA(\n\t\timage.Rect(0, 0,\n\t\t\timg1.Bounds().Dx(),\n\t\t\timg1.Bounds().Dy()+img2.Bounds().Dy()))\n\tdraw.Draw(output, img1.Bounds(), img1, image.Point{}, draw.Src)\n\timg2Offset := image.Pt(0, img1.Bounds().Dy())\n\tdraw.Draw(output, img2.Bounds().Add(img2Offset), img2, image.Point{}, draw.Src)\n\treturn output\n}\n\nfunc MergeYSameXRead(locations []string, larger bool) (image.Image, error) {\n\timages, err := ReadImages(locations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn MergeYSameX(images, true), nil\n\t\/*\n\t\timg1, _, err := ReadImageAny(location1)\n\t\tif err != nil {\n\t\t\treturn img1, err\n\t\t}\n\t\timg2, _, err := ReadImageAny(location2)\n\t\tif err != nil {\n\t\t\treturn img2, err\n\t\t}\n\t\treturn MergeYSameX(img1, img2, true), nil\n\t*\/\n}\n\nfunc MatrixMergeRead(matrix [][]string, largerX, largerY bool) (image.Image, error) {\n\tmatrixImages := [][]image.Image{}\n\tfor _, row := range matrix {\n\t\tif len(row) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\timages, err := ReadImages(row)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmatrixImages = append(matrixImages, images)\n\t}\n\n\treturn MatrixMerge(matrixImages, largerX, largerY), nil\n}\n\nfunc MatrixMerge(matrix [][]image.Image, largerX, largerY bool) image.Image {\n\tif len(matrix) == 0 {\n\t\treturn nil\n\t}\n\trowImages := []image.Image{}\n\tfor _, rowParts := range matrix {\n\t\tif len(rowParts) > 0 {\n\t\t\trowImages = append(rowImages, MergeXSameY(rowParts, largerY))\n\t\t}\n\t}\n\tif len(rowImages) == 0 {\n\t\treturn nil\n\t}\n\treturn MergeYSameX(rowImages, largerX)\n}\n\nfunc merge4Read(location1, location2, location3, location4 string, larger bool) (image.Image, error) {\n\timg12, err := MergeXSameYRead([]string{location1, location2}, larger)\n\tif err != nil {\n\t\treturn img12, err\n\t}\n\n\timg34, err := MergeXSameYRead([]string{location3, location4}, larger)\n\tif err != nil {\n\t\treturn img34, err\n\t}\n\n\treturn MergeYSameX([]image.Image{img12, img34}, larger), nil\n}\n<commit_msg>clean: imageutil: remove old code<commit_after>package imageutil\n\nimport (\n\t\"image\"\n\n\t\"golang.org\/x\/image\/draw\"\n)\n\nfunc OverlayCenterYLeftAlign(imgBg, imgOver image.Image) image.Image {\n\toutput := image.NewRGBA(imgBg.Bounds())\n\tdraw.Draw(output, imgBg.Bounds(), imgBg, image.ZP, draw.Src)\n\n\th1 := imgBg.Bounds().Dy()\n\th2 := imgOver.Bounds().Dy()\n\toffset := image.Pt(0, (h1-h2)\/2)\n\n\tdraw.Draw(output, imgOver.Bounds().Add(offset), imgOver, image.Point{}, draw.Src)\n\treturn output\n}\n\nfunc MergeXSameY(images []image.Image, larger bool) image.Image {\n\tif len(images) == 0 {\n\t\treturn nil\n\t} else if len(images) == 1 {\n\t\treturn images[0]\n\t}\n\timages = ResizeSameY(images, larger)\n\t_, _, minY, _, sumX, _ := SliceXY(images, -1)\n\toutput := image.NewRGBA(image.Rect(0, 0, sumX, minY))\n\tsumXPrev := 0\n\tfor i, img := range images {\n\t\tif i == 0 {\n\t\t\tdraw.Draw(output, img.Bounds(), img, image.Point{}, draw.Src)\n\t\t} else {\n\t\t\timgOffset := image.Pt(sumXPrev, 0)\n\t\t\tdraw.Draw(output, img.Bounds().Add(imgOffset), img, image.Point{}, draw.Src)\n\t\t}\n\t\tsumXPrev += img.Bounds().Dx()\n\t}\n\treturn output\n}\n\nfunc mergeXSameYTwo(img1, img2 image.Image, larger bool) image.Image {\n\timg1, img2 = ResizeSameYTwo(img1, img2, larger)\n\toutput := image.NewRGBA(\n\t\timage.Rect(0, 0,\n\t\t\timg1.Bounds().Dx()+img2.Bounds().Dx(),\n\t\t\timg1.Bounds().Dy()))\n\tdraw.Draw(output, img1.Bounds(), img1, image.Point{}, draw.Src)\n\timg2Offset := image.Pt(img1.Bounds().Dx(), 0)\n\tdraw.Draw(output, img2.Bounds().Add(img2Offset), img2, image.Point{}, draw.Src)\n\treturn output\n}\n\nfunc MergeXSameYRead(locations []string, larger bool) (image.Image, error) {\n\timages, err := ReadImages(locations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn MergeXSameY(images, true), nil\n}\n\nfunc MergeYSameX(images []image.Image, larger bool) image.Image {\n\tif len(images) == 0 {\n\t\treturn nil\n\t} else if len(images) == 1 {\n\t\treturn images[0]\n\t}\n\timages = ResizeSameX(images, larger)\n\tminX, _, _, _, _, sumY := SliceXY(images, -1)\n\toutput := image.NewRGBA(image.Rect(0, 0, minX, sumY))\n\tsumYPrev := 0\n\tfor i, img := range images {\n\t\tif i == 0 {\n\t\t\tdraw.Draw(output, img.Bounds(), img, image.Point{}, draw.Src)\n\t\t} else {\n\t\t\timgOffset := image.Pt(0, sumYPrev)\n\t\t\tdraw.Draw(output, img.Bounds().Add(imgOffset), img, image.Point{}, draw.Src)\n\t\t}\n\t\tsumYPrev += img.Bounds().Dy()\n\t}\n\treturn output\n}\n\nfunc mergeYSameXTwo(img1, img2 image.Image, larger bool) image.Image {\n\timg1, img2 = ResizeSameXTwo(img1, img2, larger)\n\toutput := image.NewRGBA(\n\t\timage.Rect(0, 0,\n\t\t\timg1.Bounds().Dx(),\n\t\t\timg1.Bounds().Dy()+img2.Bounds().Dy()))\n\tdraw.Draw(output, img1.Bounds(), img1, image.Point{}, draw.Src)\n\timg2Offset := image.Pt(0, img1.Bounds().Dy())\n\tdraw.Draw(output, img2.Bounds().Add(img2Offset), img2, image.Point{}, draw.Src)\n\treturn output\n}\n\nfunc MergeYSameXRead(locations []string, larger bool) (image.Image, error) {\n\timages, err := ReadImages(locations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn MergeYSameX(images, true), nil\n}\n\nfunc MatrixMergeRead(matrix [][]string, largerX, largerY bool) (image.Image, error) {\n\tmatrixImages := [][]image.Image{}\n\tfor _, row := range matrix {\n\t\tif len(row) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\timages, err := ReadImages(row)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmatrixImages = append(matrixImages, images)\n\t}\n\n\treturn MatrixMerge(matrixImages, largerX, largerY), nil\n}\n\nfunc MatrixMerge(matrix [][]image.Image, largerX, largerY bool) image.Image {\n\tif len(matrix) == 0 {\n\t\treturn nil\n\t}\n\trowImages := []image.Image{}\n\tfor _, rowParts := range matrix {\n\t\tif len(rowParts) > 0 {\n\t\t\trowImages = append(rowImages, MergeXSameY(rowParts, largerY))\n\t\t}\n\t}\n\tif len(rowImages) == 0 {\n\t\treturn nil\n\t}\n\treturn MergeYSameX(rowImages, largerX)\n}\n\nfunc merge4Read(location1, location2, location3, location4 string, larger bool) (image.Image, error) {\n\timg12, err := MergeXSameYRead([]string{location1, location2}, larger)\n\tif err != nil {\n\t\treturn img12, err\n\t}\n\n\timg34, err := MergeXSameYRead([]string{location3, location4}, larger)\n\tif err != nil {\n\t\treturn img34, err\n\t}\n\n\treturn MergeYSameX([]image.Image{img12, img34}, larger), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage storage\n\nimport (\n\t\"context\"\n\n\t\"github.com\/apigee\/registry\/server\/registry\/internal\/storage\/models\"\n\t\"github.com\/apigee\/registry\/server\/registry\/names\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\t\"gorm.io\/gorm\"\n)\n\nfunc (c *Client) DeleteProject(ctx context.Context, name names.Project, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tvar count int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Project{},\n\t\t\tmodels.Api{},\n\t\t\tmodels.Deployment{},\n\t\t\tmodels.DeploymentRevisionTag{},\n\t\t\tmodels.Version{},\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t\tmodels.Artifact{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcount += op.RowsAffected\n\t\t}\n\n\t\tif count > 1 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteApi(ctx context.Context, name names.Api, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tvar count int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Api{},\n\t\t\tmodels.Deployment{},\n\t\t\tmodels.DeploymentRevisionTag{},\n\t\t\tmodels.Version{},\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t\tmodels.Artifact{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcount += op.RowsAffected\n\t\t}\n\n\t\tif count > 1 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteVersion(ctx context.Context, name names.Version, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tvar count int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Version{},\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t\tmodels.Artifact{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"version_id = ?\", name.VersionID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcount += op.RowsAffected\n\t\t}\n\n\t\tif count > 1 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteSpec(ctx context.Context, name names.Spec, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"version_id = ?\", name.VersionID).\n\t\t\t\tWhere(\"spec_id = ?\", name.SpecID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar childCount int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Artifact{},\n\t\t\tmodels.Blob{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"version_id = ?\", name.VersionID).\n\t\t\t\tWhere(\"spec_id = ?\", name.SpecID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tchildCount += op.RowsAffected\n\t\t}\n\n\t\tif childCount > 0 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteSpecRevision(ctx context.Context, name names.SpecRevision) error {\n\tname, err := c.unwrapSpecRevisionTag(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, model := range []interface{}{\n\t\tmodels.Spec{},\n\t\tmodels.SpecRevisionTag{},\n\t} {\n\t\top := c.db.WithContext(ctx).\n\t\t\tWhere(\"project_id = ?\", name.ProjectID).\n\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\tWhere(\"version_id = ?\", name.VersionID).\n\t\t\tWhere(\"spec_id = ?\", name.SpecID).\n\t\t\tWhere(\"revision_id = ?\", name.RevisionID)\n\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) DeleteDeployment(ctx context.Context, name names.Deployment, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Deployment{},\n\t\t\tmodels.DeploymentRevisionTag{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar childCount int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Artifact{},\n\t\t\tmodels.Blob{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tchildCount += op.RowsAffected\n\t\t}\n\n\t\tif childCount > 0 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteDeploymentRevision(ctx context.Context, name names.DeploymentRevision) error {\n\tname, err := c.unwrapDeploymentRevisionTag(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, model := range []interface{}{\n\t\tmodels.Deployment{},\n\t\tmodels.DeploymentRevisionTag{},\n\t} {\n\t\top := c.db.WithContext(ctx).\n\t\t\tWhere(\"project_id = ?\", name.ProjectID).\n\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID).\n\t\t\tWhere(\"revision_id = ?\", name.RevisionID)\n\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) DeleteArtifact(ctx context.Context, name names.Artifact) error {\n\tfor _, model := range []interface{}{\n\t\tmodels.Blob{},\n\t\tmodels.Artifact{},\n\t} {\n\t\top := c.db.WithContext(ctx).\n\t\t\tWhere(\"project_id = ?\", name.ProjectID()).\n\t\t\tWhere(\"api_id = ?\", name.ApiID()).\n\t\t\tWhere(\"version_id = ?\", name.VersionID()).\n\t\t\tWhere(\"spec_id = ?\", name.SpecID()).\n\t\t\tWhere(\"artifact_id = ?\", name.ArtifactID())\n\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Ensure that when artifacts are deleted, the deployment id matches. (#637)<commit_after>\/\/ Copyright 2022 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage storage\n\nimport (\n\t\"context\"\n\n\t\"github.com\/apigee\/registry\/server\/registry\/internal\/storage\/models\"\n\t\"github.com\/apigee\/registry\/server\/registry\/names\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\t\"gorm.io\/gorm\"\n)\n\nfunc (c *Client) DeleteProject(ctx context.Context, name names.Project, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tvar count int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Project{},\n\t\t\tmodels.Api{},\n\t\t\tmodels.Deployment{},\n\t\t\tmodels.DeploymentRevisionTag{},\n\t\t\tmodels.Version{},\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t\tmodels.Artifact{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcount += op.RowsAffected\n\t\t}\n\n\t\tif count > 1 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteApi(ctx context.Context, name names.Api, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tvar count int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Api{},\n\t\t\tmodels.Deployment{},\n\t\t\tmodels.DeploymentRevisionTag{},\n\t\t\tmodels.Version{},\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t\tmodels.Artifact{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcount += op.RowsAffected\n\t\t}\n\n\t\tif count > 1 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteVersion(ctx context.Context, name names.Version, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tvar count int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Version{},\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t\tmodels.Artifact{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"version_id = ?\", name.VersionID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcount += op.RowsAffected\n\t\t}\n\n\t\tif count > 1 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteSpec(ctx context.Context, name names.Spec, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Spec{},\n\t\t\tmodels.SpecRevisionTag{},\n\t\t\tmodels.Blob{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"version_id = ?\", name.VersionID).\n\t\t\t\tWhere(\"spec_id = ?\", name.SpecID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar childCount int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Artifact{},\n\t\t\tmodels.Blob{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"version_id = ?\", name.VersionID).\n\t\t\t\tWhere(\"spec_id = ?\", name.SpecID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tchildCount += op.RowsAffected\n\t\t}\n\n\t\tif childCount > 0 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteSpecRevision(ctx context.Context, name names.SpecRevision) error {\n\tname, err := c.unwrapSpecRevisionTag(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, model := range []interface{}{\n\t\tmodels.Spec{},\n\t\tmodels.SpecRevisionTag{},\n\t} {\n\t\top := c.db.WithContext(ctx).\n\t\t\tWhere(\"project_id = ?\", name.ProjectID).\n\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\tWhere(\"version_id = ?\", name.VersionID).\n\t\t\tWhere(\"spec_id = ?\", name.SpecID).\n\t\t\tWhere(\"revision_id = ?\", name.RevisionID)\n\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) DeleteDeployment(ctx context.Context, name names.Deployment, cascade bool) error {\n\terr := c.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Deployment{},\n\t\t\tmodels.DeploymentRevisionTag{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar childCount int64\n\t\tfor _, model := range []interface{}{\n\t\t\tmodels.Artifact{},\n\t\t\tmodels.Blob{},\n\t\t} {\n\t\t\top := tx.Where(\"project_id = ?\", name.ProjectID).\n\t\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID)\n\t\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tchildCount += op.RowsAffected\n\t\t}\n\n\t\tif childCount > 0 && !cascade {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"cannot delete child resources in non-cascading mode\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\treturn nil\n\tcase codes.FailedPrecondition:\n\t\treturn err\n\tdefault:\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n}\n\nfunc (c *Client) DeleteDeploymentRevision(ctx context.Context, name names.DeploymentRevision) error {\n\tname, err := c.unwrapDeploymentRevisionTag(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, model := range []interface{}{\n\t\tmodels.Deployment{},\n\t\tmodels.DeploymentRevisionTag{},\n\t} {\n\t\top := c.db.WithContext(ctx).\n\t\t\tWhere(\"project_id = ?\", name.ProjectID).\n\t\t\tWhere(\"api_id = ?\", name.ApiID).\n\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID).\n\t\t\tWhere(\"revision_id = ?\", name.RevisionID)\n\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) DeleteArtifact(ctx context.Context, name names.Artifact) error {\n\tfor _, model := range []interface{}{\n\t\tmodels.Blob{},\n\t\tmodels.Artifact{},\n\t} {\n\t\top := c.db.WithContext(ctx).\n\t\t\tWhere(\"project_id = ?\", name.ProjectID()).\n\t\t\tWhere(\"api_id = ?\", name.ApiID()).\n\t\t\tWhere(\"version_id = ?\", name.VersionID()).\n\t\t\tWhere(\"spec_id = ?\", name.SpecID()).\n\t\t\tWhere(\"deployment_id = ?\", name.DeploymentID()).\n\t\t\tWhere(\"artifact_id = ?\", name.ArtifactID())\n\t\tif err := op.Delete(model).Error; err != nil {\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package discrete_test\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com\/kzahedi\/goent\/discrete\"\n)\n\nfunc random3D() [][][]float64 {\n\td := make([][][]float64, 2, 2)\n\tfor i := 0; i < 2; i++ {\n\t\td[i] = make([][]float64, 2, 2)\n\t\tfor j := 0; j < 2; j++ {\n\t\t\td[i][j] = make([]float64, 2, 2)\n\t\t}\n\t}\n\n\t\/\/ number generated by mathematica\n\td[0][0][0] = 0.16368852271549625\n\td[0][0][1] = 0.06376901757394152\n\td[0][1][0] = 0.1665988153736691\n\td[0][1][1] = 0.10434124349128561\n\td[1][0][0] = 0.06763873914824399\n\td[1][0][1] = 0.23773709094595946\n\td[1][1][0] = 0.1802225946987888\n\td[1][1][1] = 0.01600397605261524\n\n\treturn d\n}\n\nfunc random2D() [][]float64 {\n\td := make([][]float64, 2, 2)\n\tfor i := 0; i < 2; i++ {\n\t\td[i] = make([]float64, 2, 2)\n\t}\n\n\t\/\/ number generated by mathematica\n\td[0][0] = 0.38545682823650396\n\td[0][1] = 0.03333130265730018\n\td[1][0] = 0.5150147316756242\n\td[1][1] = 0.0661971374305717\n\n\treturn d\n}\n\nfunc random1D() []float64 {\n\td := make([]float64, 2, 2)\n\n\t\/\/ number generated by mathematica\n\td[0] = 0.4739196991545937\n\td[1] = 0.5260803008454062\n\n\treturn d\n}\n\nfunc TestPX(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PX(p)\n\ts := []float64{0.498398, 0.501602}\n\tcheck1D(r, s, \"PX\", t)\n}\n\nfunc TestPY(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PY(p)\n\ts := []float64{0.532833, 0.467167}\n\tcheck1D(r, s, \"PY\", t)\n}\n\nfunc TestPZ(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PZ(p)\n\ts := []float64{0.578149, 0.421851}\n\tcheck1D(r, s, \"PZ\", t)\n}\n\nfunc TestPYZ(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PYZ(p)\n\ts := [][]float64{{0.231327, 0.301506}, {0.346821, 0.120345}}\n\tcheck2D(r, s, \"PYZ\", t)\n}\n\nfunc TestPXZ(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PXZ(p)\n\ts := [][]float64{{0.330287, 0.16811}, {0.247861, 0.253741}}\n\tcheck2D(r, s, \"PXZ\", t)\n}\n\nfunc TestPXY(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PXY(p)\n\ts := [][]float64{{0.227458, 0.27094}, {0.305376, 0.196227}}\n\tcheck2D(r, s, \"PXY\", t)\n}\n\nfunc TestH3(t *testing.T) {\n\tp := random3D()\n\tr := discrete.H3(p)\n\t\/\/ result taken from a working implementation in Mathematica\n\tif math.Abs(r-2.74816) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"H3 should be %f, but it is %f\", 2.74816, r))\n\t}\n}\n\nfunc TestH2(t *testing.T) {\n\tp := random2D()\n\tr := discrete.H2(p)\n\t\/\/ result taken from a working implementation in Mathematica\n\tif math.Abs(r-1.44603) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"H3 should be %f, but it is %f\", 1.44603, r))\n\t}\n}\n\nfunc TestH1(t *testing.T) {\n\tp := random1D()\n\tr := discrete.H1(p)\n\n\tif math.Abs(r-0.998037) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"H1 of uniform should be %f, but it is %f\", 0.998037, r))\n\t}\n}\n\nfunc TestMiXvYgZ(t *testing.T) {\n\tp := random3D()\n\ts := 0.138276\n\tr := discrete.MiXvYgZ(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"MiXvYgZ should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestMiXvZgY(t *testing.T) {\n\tp := random3D()\n\ts := 0.142503\n\tr := discrete.MiXvZgY(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"MiXvZgY should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestMiXvY(t *testing.T) {\n\tp := random3D()\n\ts := 0.0168978\n\tr := discrete.MiXvY(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"MiXvY should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestCoI(t *testing.T) {\n\tp := random3D()\n\ts := discrete.MiXvY(p) - discrete.MiXvYgZ(p)\n\tr := discrete.CoI(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"CoI should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestPt(t *testing.T) {\n\tp := random3D()\n\n\tA := make([][][]float64, 2, 2)\n\tA[0] = make([][]float64, 2, 2)\n\tA[1] = make([][]float64, 2, 2)\n\tA[0][0] = make([]float64, 2, 2)\n\tA[0][1] = make([]float64, 2, 2)\n\tA[1][0] = make([]float64, 2, 2)\n\tA[1][1] = make([]float64, 2, 2)\n\n\tB := make([][][]float64, 2, 2)\n\tB[0] = make([][]float64, 2, 2)\n\tB[1] = make([][]float64, 2, 2)\n\tB[0][0] = make([]float64, 2, 2)\n\tB[0][1] = make([]float64, 2, 2)\n\tB[1][0] = make([]float64, 2, 2)\n\tB[1][1] = make([]float64, 2, 2)\n\n\tA[0][0][0] = 1.0\n\tA[0][0][1] = -1.0\n\tA[0][1][0] = -1.0\n\tA[0][1][1] = 1.0\n\n\tB[1][0][0] = 1.0\n\tB[1][0][1] = -1.0\n\tB[1][1][0] = -1.0\n\tB[1][1][1] = 1.0\n\n\t\/\/ Numbers generated with a working Mathematica implementation\n\tr := discrete.Pt(p, -1.0, 1.0)\n\ts := [][][]float64{{{-0.836311, 1.06377}, {1.1666, -0.895659}}, {{1.06764, -0.762263}, {-0.819777, 1.016}}}\n\tcheck3D(r, s, \"Pt\", t)\n\n\tr = discrete.Pt(p, -1.0, -1.0)\n\ts = [][][]float64{{{-0.836311, 1.06377}, {1.1666, -0.895659}}, {{-0.932361, 1.23774}, {1.18022, -0.983996}}}\n\tcheck3D(r, s, \"Pt\", t)\n\n\tr = discrete.Pt(p, 1.0, -1.0)\n\ts = [][][]float64{{{1.16369, -0.936231}, {-0.833401, 1.10434}}, {{-0.932361, 1.23774}, {1.18022, -0.983996}}}\n\tcheck3D(r, s, \"Pt\", t)\n\n\tr = discrete.Pt(p, 1.0, 1.0)\n\ts = [][][]float64{{{1.16369, -0.936231}, {-0.833401, 1.10434}}, {{1.06764, -0.762263}, {-0.819777, 1.016}}}\n\tcheck3D(r, s, \"Pt\", t)\n}\n\nfunc TestID(t *testing.T) {\n\tp := random3D()\n\ta, b, c := discrete.InformationDecomposition(p, 100)\n\n\tif math.Abs(a-0.1376) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"Synergy should be %f, but it is %f\", 0.1376, a))\n\t}\n\n\tif math.Abs(b-0.000676248) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"UniqueXY should be %f, but it is %f\", 0.000676248, b))\n\t}\n\n\tif math.Abs(c-0.00490289) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"UniqueXZ should be %f, but it is %f\", 0.00490289, c))\n\t}\n\n}\n\nfunc check3D(r, s [][][]float64, label string, t *testing.T) {\n\tfor i := 0; i < 2; i++ {\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tfor k := 0; k < 2; k++ {\n\t\t\t\tif math.Abs(r[i][j][k]-s[i][j][k]) > 0.0001 {\n\t\t\t\t\tt.Errorf(fmt.Sprintf(\"%s should be %f, but it is %f\", label, s[i][j][k], r[i][j][k]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc check2D(r, s [][]float64, label string, t *testing.T) {\n\tfor i := 0; i < 2; i++ {\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tif math.Abs(r[i][j]-s[i][j]) > 0.0001 {\n\t\t\t\tt.Errorf(fmt.Sprintf(\"%s should be %f, but it is %f\", label, s[i][j], r[i][j]))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc check1D(r, s []float64, label string, t *testing.T) {\n\tfor i := 0; i < 2; i++ {\n\t\tif math.Abs(r[i]-s[i]) > 0.0001 {\n\t\t\tt.Errorf(fmt.Sprintf(\"%s should be %f, but it is %f\", label, s[i], r[i]))\n\t\t}\n\t}\n}\n<commit_msg>Test case for InformationDecomposition incldued<commit_after>package discrete_test\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com\/kzahedi\/goent\/discrete\"\n)\n\nfunc check3D(r, s [][][]float64, label string, t *testing.T) {\n\tfor i := 0; i < 2; i++ {\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tfor k := 0; k < 2; k++ {\n\t\t\t\tif math.Abs(r[i][j][k]-s[i][j][k]) > 0.0001 {\n\t\t\t\t\tt.Errorf(fmt.Sprintf(\"%s should be %f, but it is %f\", label, s[i][j][k], r[i][j][k]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc check2D(r, s [][]float64, label string, t *testing.T) {\n\tfor i := 0; i < 2; i++ {\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tif math.Abs(r[i][j]-s[i][j]) > 0.0001 {\n\t\t\t\tt.Errorf(fmt.Sprintf(\"%s should be %f, but it is %f\", label, s[i][j], r[i][j]))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc check1D(r, s []float64, label string, t *testing.T) {\n\tfor i := 0; i < 2; i++ {\n\t\tif math.Abs(r[i]-s[i]) > 0.0001 {\n\t\t\tt.Errorf(fmt.Sprintf(\"%s should be %f, but it is %f\", label, s[i], r[i]))\n\t\t}\n\t}\n}\nfunc random3D() [][][]float64 {\n\td := make([][][]float64, 2, 2)\n\tfor i := 0; i < 2; i++ {\n\t\td[i] = make([][]float64, 2, 2)\n\t\tfor j := 0; j < 2; j++ {\n\t\t\td[i][j] = make([]float64, 2, 2)\n\t\t}\n\t}\n\n\t\/\/ number generated by mathematica\n\td[0][0][0] = 0.16368852271549625\n\td[0][0][1] = 0.06376901757394152\n\td[0][1][0] = 0.1665988153736691\n\td[0][1][1] = 0.10434124349128561\n\td[1][0][0] = 0.06763873914824399\n\td[1][0][1] = 0.23773709094595946\n\td[1][1][0] = 0.1802225946987888\n\td[1][1][1] = 0.01600397605261524\n\n\treturn d\n}\n\nfunc random3D2() [][][]float64 {\n\td := make([][][]float64, 2, 2)\n\tfor i := 0; i < 2; i++ {\n\t\td[i] = make([][]float64, 2, 2)\n\t\tfor j := 0; j < 2; j++ {\n\t\t\td[i][j] = make([]float64, 2, 2)\n\t\t}\n\t}\n\n\t\/\/ number generated by mathematica\n\td[0][0][0] = 0.13407218883879615\n\td[0][0][1] = 0.12305057644008721\n\td[0][1][0] = 0.15007259009459384\n\td[0][1][1] = 0.17612898934230117\n\td[1][0][0] = 0.24366925703249445\n\td[1][0][1] = 0.05511361499384561\n\td[1][1][0] = 0.08273915957249026\n\td[1][1][1] = 0.03515362368539128\n\n\treturn d\n}\nfunc random2D() [][]float64 {\n\td := make([][]float64, 2, 2)\n\tfor i := 0; i < 2; i++ {\n\t\td[i] = make([]float64, 2, 2)\n\t}\n\n\t\/\/ number generated by mathematica\n\td[0][0] = 0.38545682823650396\n\td[0][1] = 0.03333130265730018\n\td[1][0] = 0.5150147316756242\n\td[1][1] = 0.0661971374305717\n\n\treturn d\n}\n\nfunc random1D() []float64 {\n\td := make([]float64, 2, 2)\n\n\t\/\/ number generated by mathematica\n\td[0] = 0.4739196991545937\n\td[1] = 0.5260803008454062\n\n\treturn d\n}\n\nfunc TestPX(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PX(p)\n\ts := []float64{0.498398, 0.501602}\n\tcheck1D(r, s, \"PX\", t)\n}\n\nfunc TestPY(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PY(p)\n\ts := []float64{0.532833, 0.467167}\n\tcheck1D(r, s, \"PY\", t)\n}\n\nfunc TestPZ(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PZ(p)\n\ts := []float64{0.578149, 0.421851}\n\tcheck1D(r, s, \"PZ\", t)\n}\n\nfunc TestPYZ(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PYZ(p)\n\ts := [][]float64{{0.231327, 0.301506}, {0.346821, 0.120345}}\n\tcheck2D(r, s, \"PYZ\", t)\n}\n\nfunc TestPXZ(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PXZ(p)\n\ts := [][]float64{{0.330287, 0.16811}, {0.247861, 0.253741}}\n\tcheck2D(r, s, \"PXZ\", t)\n}\n\nfunc TestPXY(t *testing.T) {\n\tp := random3D()\n\tr := discrete.PXY(p)\n\ts := [][]float64{{0.227458, 0.27094}, {0.305376, 0.196227}}\n\tcheck2D(r, s, \"PXY\", t)\n}\n\nfunc TestH3(t *testing.T) {\n\tp := random3D()\n\tr := discrete.H3(p)\n\t\/\/ result taken from a working implementation in Mathematica\n\tif math.Abs(r-2.74816) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"H3 should be %f, but it is %f\", 2.74816, r))\n\t}\n}\n\nfunc TestH2(t *testing.T) {\n\tp := random2D()\n\tr := discrete.H2(p)\n\t\/\/ result taken from a working implementation in Mathematica\n\tif math.Abs(r-1.44603) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"H3 should be %f, but it is %f\", 1.44603, r))\n\t}\n}\n\nfunc TestH1(t *testing.T) {\n\tp := random1D()\n\tr := discrete.H1(p)\n\n\tif math.Abs(r-0.998037) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"H1 of uniform should be %f, but it is %f\", 0.998037, r))\n\t}\n}\n\nfunc TestMiXvYgZ(t *testing.T) {\n\tp := random3D()\n\ts := 0.138276\n\tr := discrete.MiXvYgZ(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"MiXvYgZ should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestMiXvZgY(t *testing.T) {\n\tp := random3D()\n\ts := 0.142503\n\tr := discrete.MiXvZgY(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"MiXvZgY should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestMiXvY(t *testing.T) {\n\tp := random3D()\n\ts := 0.0168978\n\tr := discrete.MiXvY(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"MiXvY should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestCoI(t *testing.T) {\n\tp := random3D()\n\ts := discrete.MiXvY(p) - discrete.MiXvYgZ(p)\n\tr := discrete.CoI(p)\n\tif math.Abs(r-s) > 0.0001 {\n\t\tt.Errorf(fmt.Sprintf(\"CoI should be %f, but it is %f\", s, r))\n\t}\n}\n\nfunc TestPt(t *testing.T) {\n\tp := random3D()\n\n\tA := make([][][]float64, 2, 2)\n\tA[0] = make([][]float64, 2, 2)\n\tA[1] = make([][]float64, 2, 2)\n\tA[0][0] = make([]float64, 2, 2)\n\tA[0][1] = make([]float64, 2, 2)\n\tA[1][0] = make([]float64, 2, 2)\n\tA[1][1] = make([]float64, 2, 2)\n\n\tB := make([][][]float64, 2, 2)\n\tB[0] = make([][]float64, 2, 2)\n\tB[1] = make([][]float64, 2, 2)\n\tB[0][0] = make([]float64, 2, 2)\n\tB[0][1] = make([]float64, 2, 2)\n\tB[1][0] = make([]float64, 2, 2)\n\tB[1][1] = make([]float64, 2, 2)\n\n\tA[0][0][0] = 1.0\n\tA[0][0][1] = -1.0\n\tA[0][1][0] = -1.0\n\tA[0][1][1] = 1.0\n\n\tB[1][0][0] = 1.0\n\tB[1][0][1] = -1.0\n\tB[1][1][0] = -1.0\n\tB[1][1][1] = 1.0\n\n\t\/\/ Numbers generated with a working Mathematica implementation\n\tr := discrete.Pt(p, -1.0, 1.0)\n\ts := [][][]float64{{{-0.836311, 1.06377}, {1.1666, -0.895659}}, {{1.06764, -0.762263}, {-0.819777, 1.016}}}\n\tcheck3D(r, s, \"Pt\", t)\n\n\tr = discrete.Pt(p, -1.0, -1.0)\n\ts = [][][]float64{{{-0.836311, 1.06377}, {1.1666, -0.895659}}, {{-0.932361, 1.23774}, {1.18022, -0.983996}}}\n\tcheck3D(r, s, \"Pt\", t)\n\n\tr = discrete.Pt(p, 1.0, -1.0)\n\ts = [][][]float64{{{1.16369, -0.936231}, {-0.833401, 1.10434}}, {{-0.932361, 1.23774}, {1.18022, -0.983996}}}\n\tcheck3D(r, s, \"Pt\", t)\n\n\tr = discrete.Pt(p, 1.0, 1.0)\n\ts = [][][]float64{{{1.16369, -0.936231}, {-0.833401, 1.10434}}, {{1.06764, -0.762263}, {-0.819777, 1.016}}}\n\tcheck3D(r, s, \"Pt\", t)\n}\n\nfunc TestID(t *testing.T) {\n\tp := [][][]float64{{{0.0895936, 0.105603}, {0.119766, 0.178879}}, {{0.109606, 0.0825123}, {0.142857, 0.171182}}}\n\ta, b, c := discrete.InformationDecomposition(p, 100)\n\tsa := 0.0\n\tsb := 0.000913594\n\tsc := 0.00479433\n\n\tif math.Abs(a-sa) > 0.05 {\n\t\tt.Errorf(fmt.Sprintf(\"Synergy should be %f, but it is %f\", sa, a))\n\t}\n\n\tif math.Abs(b-sb) > 0.05 {\n\t\tt.Errorf(fmt.Sprintf(\"UniqueXY should be %f, but it is %f\", sb, b))\n\t}\n\n\tif math.Abs(c-sc) > 0.05 {\n\t\tt.Errorf(fmt.Sprintf(\"UniqueXZ should be %f, but it is %f\", sc, c))\n\t}\n\n\tp = [][][]float64{{{0.00444861, 0.030672}, {0.219387, 0.0636853}}, {{0.154765, 0.141419}, {0.191993, 0.193631}}}\n\ta, b, c = discrete.InformationDecomposition(p, 100)\n\tsa = 0.0460428\n\tsb = 0.0680672\n\tsc = 0.0094116\n\n\tif math.Abs(a-sa) > 0.05 {\n\t\tt.Errorf(fmt.Sprintf(\"Synergy should be %f, but it is %f\", sa, a))\n\t}\n\n\tif math.Abs(b-sb) > 0.05 {\n\t\tt.Errorf(fmt.Sprintf(\"UniqueXY should be %f, but it is %f\", sb, b))\n\t}\n\n\tif math.Abs(c-sc) > 0.05 {\n\t\tt.Errorf(fmt.Sprintf(\"UniqueXZ should be %f, but it is %f\", sc, c))\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package remote\n\nimport (\n\t\"context\"\n\t\"net\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/compression\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/encoding\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/filesystem\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/logging\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\/core\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\/endpoint\/local\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\/rsync\"\n)\n\n\/\/ endpointServer wraps a local endpoint instances and dispatches requests to\n\/\/ this endpoint from an endpoint client.\ntype endpointServer struct {\n\t\/\/ encoder is the control stream encoder.\n\tencoder *encoding.ProtobufEncoder\n\t\/\/ decoder is the control stream decoder.\n\tdecoder *encoding.ProtobufDecoder\n\t\/\/ endpoint is the underlying local endpoint.\n\tendpoint synchronization.Endpoint\n}\n\n\/\/ ServeEndpoint creates and serves a remote endpoint server on the specified\n\/\/ connection. It enforces that the provided connection is closed by the time\n\/\/ this function returns, regardless of failure.\nfunc ServeEndpoint(logger *logging.Logger, connection net.Conn, options ...EndpointServerOption) error {\n\t\/\/ Defer closure of the connection.\n\tdefer connection.Close()\n\n\t\/\/ Enable read\/write compression on the connection.\n\treader := compression.NewDecompressingReader(connection)\n\twriter := compression.NewCompressingWriter(connection)\n\n\t\/\/ Create an encoder and decoder.\n\tencoder := encoding.NewProtobufEncoder(writer)\n\tdecoder := encoding.NewProtobufDecoder(reader)\n\n\t\/\/ Create an endpoint configuration and apply all options.\n\tendpointServerOptions := &endpointServerOptions{}\n\tfor _, o := range options {\n\t\to.apply(endpointServerOptions)\n\t}\n\n\t\/\/ Receive the initialize request. If this fails, then send a failure\n\t\/\/ response (even though the pipe is probably broken) and abort.\n\trequest := &InitializeSynchronizationRequest{}\n\tif err := decoder.Decode(request); err != nil {\n\t\terr = errors.Wrap(err, \"unable to receive initialize request\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t}\n\n\t\/\/ If a root path override has been specified, then apply it.\n\tif endpointServerOptions.root != \"\" {\n\t\trequest.Root = endpointServerOptions.root\n\t}\n\n\t\/\/ If configuration overrides have been provided, then validate them and\n\t\/\/ merge them into the main configuration.\n\tif endpointServerOptions.configuration != nil {\n\t\tif err := endpointServerOptions.configuration.EnsureValid(true); err != nil {\n\t\t\terr = errors.Wrap(err, \"override configuration invalid\")\n\t\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\t\treturn err\n\t\t}\n\t\trequest.Configuration = synchronization.MergeConfigurations(\n\t\t\trequest.Configuration,\n\t\t\tendpointServerOptions.configuration,\n\t\t)\n\t}\n\n\t\/\/ If a connection validator has been provided, then ensure that it\n\t\/\/ approves if the specified endpoint configuration.\n\tif endpointServerOptions.connectionValidator != nil {\n\t\terr := endpointServerOptions.connectionValidator(\n\t\t\trequest.Root,\n\t\t\trequest.Session,\n\t\t\trequest.Version,\n\t\t\trequest.Configuration,\n\t\t\trequest.Alpha,\n\t\t)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"endpoint configuration rejected\")\n\t\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Ensure that the initialization request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\terr = errors.Wrap(err, \"invalid initialize request\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t}\n\n\t\/\/ Expand and normalize the root path.\n\tif r, err := filesystem.Normalize(request.Root); err != nil {\n\t\terr = errors.Wrap(err, \"unable to normalize synchronization root\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t} else {\n\t\trequest.Root = r\n\t}\n\n\t\/\/ Create the underlying endpoint. If it fails to create, then send a\n\t\/\/ failure response and abort. If it succeeds, then defer its closure.\n\tendpoint, err := local.NewEndpoint(\n\t\tlogger,\n\t\trequest.Root,\n\t\trequest.Session,\n\t\trequest.Version,\n\t\trequest.Configuration,\n\t\trequest.Alpha,\n\t\tendpointServerOptions.endpointOptions...,\n\t)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"unable to create underlying endpoint\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t}\n\tdefer endpoint.Shutdown()\n\n\t\/\/ Send a successful initialize response.\n\tif err = encoder.Encode(&InitializeSynchronizationResponse{}); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send initialize response\")\n\t}\n\n\t\/\/ Create the server.\n\tserver := &endpointServer{\n\t\tendpoint: endpoint,\n\t\tencoder: encoder,\n\t\tdecoder: decoder,\n\t}\n\n\t\/\/ Server until an error occurs.\n\treturn server.serve()\n}\n\n\/\/ serve is the main request handling loop.\nfunc (s *endpointServer) serve() error {\n\t\/\/ Keep a reusable endpoint request.\n\trequest := &EndpointRequest{}\n\n\t\/\/ Receive and process control requests until there's an error.\n\tfor {\n\t\t\/\/ Receive the next request.\n\t\t*request = EndpointRequest{}\n\t\tif err := s.decoder.Decode(request); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to receive request\")\n\t\t} else if err = request.ensureValid(); err != nil {\n\t\t\treturn errors.Wrap(err, \"invalid endpoint request\")\n\t\t}\n\n\t\t\/\/ Handle the request based on type.\n\t\tif request.Poll != nil {\n\t\t\tif err := s.servePoll(request.Poll); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve poll request\")\n\t\t\t}\n\t\t} else if request.Scan != nil {\n\t\t\tif err := s.serveScan(request.Scan); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve scan request\")\n\t\t\t}\n\t\t} else if request.Stage != nil {\n\t\t\tif err := s.serveStage(request.Stage); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve stage request\")\n\t\t\t}\n\t\t} else if request.Supply != nil {\n\t\t\tif err := s.serveSupply(request.Supply); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve supply request\")\n\t\t\t}\n\t\t} else if request.Transition != nil {\n\t\t\tif err := s.serveTransition(request.Transition); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve transition request\")\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ TODO: Should we panic here? The request validation already\n\t\t\t\/\/ ensures that one and only one message component is set, so we\n\t\t\t\/\/ should never hit this condition.\n\t\t\treturn errors.New(\"invalid request\")\n\t\t}\n\t}\n}\n\n\/\/ servePoll serves a poll request.\nfunc (s *endpointServer) servePoll(request *PollRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid poll request\")\n\t}\n\n\t\/\/ Create a cancellable context for executing the poll.\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t\/\/ Start a Goroutine to execute the poll and send a response when done.\n\tresponseSendErrors := make(chan error, 1)\n\tgo func() {\n\t\tresponse := &PollResponse{}\n\t\tif err := s.endpoint.Poll(ctx); err != nil {\n\t\t\tresponse.Error = err.Error()\n\t\t}\n\t\tresponseSendErrors <- s.encoder.Encode(response)\n\t}()\n\n\t\/\/ Start a Goroutine to watch for the completion request.\n\tcompletionReceiveErrors := make(chan error, 1)\n\tgo func() {\n\t\trequest := &PollCompletionRequest{}\n\t\tcompletionReceiveErrors <- s.decoder.Decode(request)\n\t}()\n\n\t\/\/ Wait for both a completion request to be received and a response to be\n\t\/\/ sent. Both of these will occur, though their order is not known. If the\n\t\/\/ completion request is received first, then we cancel the subcontext to\n\t\/\/ preempt the scan and force transmission of a response. If the response is\n\t\/\/ sent first, then we know the completion request is on its way. In this\n\t\/\/ case, we still cancel the subcontext we created as required by the\n\t\/\/ context package to avoid leaking resources.\n\tvar responseSendErr, completionReceiveErr error\n\tselect {\n\tcase completionReceiveErr = <-completionReceiveErrors:\n\t\tcancel()\n\t\tresponseSendErr = <-responseSendErrors\n\tcase responseSendErr = <-responseSendErrors:\n\t\tcancel()\n\t\tcompletionReceiveErr = <-completionReceiveErrors\n\t}\n\n\t\/\/ Check for errors.\n\tif responseSendErr != nil {\n\t\treturn responseSendErr\n\t} else if completionReceiveErr != nil {\n\t\treturn completionReceiveErr\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveScan serves a scan request.\nfunc (s *endpointServer) serveScan(request *ScanRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid scan request\")\n\t}\n\n\t\/\/ Create a cancellable context for executing the scan. The context may be\n\t\/\/ cancelled to force a response, but in case the response comes naturally,\n\t\/\/ ensure the context is cancelled before we're done to avoid leaking a\n\t\/\/ Goroutine.\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t\/\/ Start a Goroutine to execute the scan and send a response when done.\n\tresponseSendErrors := make(chan error, 1)\n\tgo func() {\n\t\t\/\/ Perform a scan. Passing a nil ancestor is fine - it's not used for\n\t\t\/\/ local endpoints anyway. If a retry is requested or an error occurs,\n\t\t\/\/ send a response.\n\t\tsnapshot, preservesExecutability, err, tryAgain := s.endpoint.Scan(ctx, nil, request.Full)\n\t\tif err != nil {\n\t\t\tresponseSendErrors <- s.encoder.Encode(&ScanResponse{\n\t\t\t\tError: err.Error(),\n\t\t\t\tTryAgain: tryAgain,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Marshal the snapshot in a deterministic fashion.\n\t\tbuffer := proto.NewBuffer(nil)\n\t\tbuffer.SetDeterministic(true)\n\t\tif err := buffer.Marshal(&core.Archive{Root: snapshot}); err != nil {\n\t\t\tresponseSendErrors <- s.encoder.Encode(&ScanResponse{\n\t\t\t\tError: errors.Wrap(err, \"unable to marshal snapshot\").Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tsnapshotBytes := buffer.Bytes()\n\n\t\t\/\/ Create an rsync engine.\n\t\tengine := rsync.NewEngine()\n\n\t\t\/\/ Compute the snapshot's delta against the base.\n\t\tdelta := engine.DeltafyBytes(snapshotBytes, request.BaseSnapshotSignature, 0)\n\n\t\t\/\/ Send the response.\n\t\tresponseSendErrors <- s.encoder.Encode(&ScanResponse{\n\t\t\tSnapshotDelta: delta,\n\t\t\tPreservesExecutability: preservesExecutability,\n\t\t})\n\t}()\n\n\t\/\/ Start a Goroutine to watch for the completion request.\n\tcompletionReceiveErrors := make(chan error, 1)\n\tgo func() {\n\t\trequest := &ScanCompletionRequest{}\n\t\tcompletionReceiveErrors <- s.decoder.Decode(request)\n\t}()\n\n\t\/\/ Wait for both a completion request to be received and a response to be\n\t\/\/ sent. Both of these will occur, though their order is not known. If the\n\t\/\/ completion request is received first, then we cancel the subcontext to\n\t\/\/ preempt the scan and force transmission of a response. If the response is\n\t\/\/ sent first, then we know the completion request is on its way. In this\n\t\/\/ case, we still cancel the subcontext we created as required by the\n\t\/\/ context package to avoid leaking resources.\n\tvar responseSendErr, completionReceiveErr error\n\tselect {\n\tcase completionReceiveErr = <-completionReceiveErrors:\n\t\tcancel()\n\t\tresponseSendErr = <-responseSendErrors\n\tcase responseSendErr = <-responseSendErrors:\n\t\tcancel()\n\t\tcompletionReceiveErr = <-completionReceiveErrors\n\t}\n\n\t\/\/ Check for errors.\n\tif responseSendErr != nil {\n\t\treturn responseSendErr\n\t} else if completionReceiveErr != nil {\n\t\treturn completionReceiveErr\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveStage serves a stage request.\nfunc (s *endpointServer) serveStage(request *StageRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid stage request\")\n\t}\n\n\t\/\/ Begin staging.\n\tpaths, signatures, receiver, err := s.endpoint.Stage(request.Paths, request.Digests)\n\tif err != nil {\n\t\ts.encoder.Encode(&StageResponse{Error: err.Error()})\n\t\treturn errors.Wrap(err, \"unable to begin staging\")\n\t}\n\n\t\/\/ Send the response.\n\tresponse := &StageResponse{\n\t\tPaths: paths,\n\t\tSignatures: signatures,\n\t}\n\tif err = s.encoder.Encode(response); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send stage response\")\n\t}\n\n\t\/\/ If there weren't any paths requiring staging, then we're done.\n\tif len(paths) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ The remote side of the connection should now forward rsync operations, so\n\t\/\/ we need to decode and forward them to the receiver. If this operation\n\t\/\/ completes successfully, staging is complete and successful.\n\tdecoder := newProtobufRsyncDecoder(s.decoder)\n\tif err = rsync.DecodeToReceiver(decoder, uint64(len(paths)), receiver); err != nil {\n\t\treturn errors.Wrap(err, \"unable to decode and forward rsync operations\")\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveSupply serves a supply request.\nfunc (s *endpointServer) serveSupply(request *SupplyRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid supply request\")\n\t}\n\n\t\/\/ Create an encoding receiver that can transmit rsync operations to the\n\t\/\/ remote.\n\tencoder := newProtobufRsyncEncoder(s.encoder)\n\treceiver := rsync.NewEncodingReceiver(encoder)\n\n\t\/\/ Perform supplying.\n\tif err := s.endpoint.Supply(request.Paths, request.Signatures, receiver); err != nil {\n\t\treturn errors.Wrap(err, \"unable to perform supplying\")\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveTransitino serves a transition request.\nfunc (s *endpointServer) serveTransition(request *TransitionRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid transition request\")\n\t}\n\n\t\/\/ Perform the transition.\n\tresults, problems, stagerMissingFiles, err := s.endpoint.Transition(request.Transitions)\n\tif err != nil {\n\t\ts.encoder.Encode(&TransitionResponse{Error: err.Error()})\n\t\treturn errors.Wrap(err, \"unable to perform transition\")\n\t}\n\n\t\/\/ HACK: Wrap the results in Archives since neither Protocol Buffers can't\n\t\/\/ encode nil pointers in the result array.\n\twrappedResults := make([]*core.Archive, len(results))\n\tfor r, result := range results {\n\t\twrappedResults[r] = &core.Archive{Root: result}\n\t}\n\n\t\/\/ Send the response.\n\tresponse := &TransitionResponse{\n\t\tResults: wrappedResults,\n\t\tProblems: problems,\n\t\tStagerMissingFiles: stagerMissingFiles,\n\t}\n\tif err = s.encoder.Encode(response); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send transition response\")\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n<commit_msg>Cleaned up remote endpoint transition serving method.<commit_after>package remote\n\nimport (\n\t\"context\"\n\t\"net\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/compression\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/encoding\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/filesystem\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/logging\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\/core\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\/endpoint\/local\"\n\t\"github.com\/mutagen-io\/mutagen\/pkg\/synchronization\/rsync\"\n)\n\n\/\/ endpointServer wraps a local endpoint instances and dispatches requests to\n\/\/ this endpoint from an endpoint client.\ntype endpointServer struct {\n\t\/\/ encoder is the control stream encoder.\n\tencoder *encoding.ProtobufEncoder\n\t\/\/ decoder is the control stream decoder.\n\tdecoder *encoding.ProtobufDecoder\n\t\/\/ endpoint is the underlying local endpoint.\n\tendpoint synchronization.Endpoint\n}\n\n\/\/ ServeEndpoint creates and serves a remote endpoint server on the specified\n\/\/ connection. It enforces that the provided connection is closed by the time\n\/\/ this function returns, regardless of failure.\nfunc ServeEndpoint(logger *logging.Logger, connection net.Conn, options ...EndpointServerOption) error {\n\t\/\/ Defer closure of the connection.\n\tdefer connection.Close()\n\n\t\/\/ Enable read\/write compression on the connection.\n\treader := compression.NewDecompressingReader(connection)\n\twriter := compression.NewCompressingWriter(connection)\n\n\t\/\/ Create an encoder and decoder.\n\tencoder := encoding.NewProtobufEncoder(writer)\n\tdecoder := encoding.NewProtobufDecoder(reader)\n\n\t\/\/ Create an endpoint configuration and apply all options.\n\tendpointServerOptions := &endpointServerOptions{}\n\tfor _, o := range options {\n\t\to.apply(endpointServerOptions)\n\t}\n\n\t\/\/ Receive the initialize request. If this fails, then send a failure\n\t\/\/ response (even though the pipe is probably broken) and abort.\n\trequest := &InitializeSynchronizationRequest{}\n\tif err := decoder.Decode(request); err != nil {\n\t\terr = errors.Wrap(err, \"unable to receive initialize request\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t}\n\n\t\/\/ If a root path override has been specified, then apply it.\n\tif endpointServerOptions.root != \"\" {\n\t\trequest.Root = endpointServerOptions.root\n\t}\n\n\t\/\/ If configuration overrides have been provided, then validate them and\n\t\/\/ merge them into the main configuration.\n\tif endpointServerOptions.configuration != nil {\n\t\tif err := endpointServerOptions.configuration.EnsureValid(true); err != nil {\n\t\t\terr = errors.Wrap(err, \"override configuration invalid\")\n\t\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\t\treturn err\n\t\t}\n\t\trequest.Configuration = synchronization.MergeConfigurations(\n\t\t\trequest.Configuration,\n\t\t\tendpointServerOptions.configuration,\n\t\t)\n\t}\n\n\t\/\/ If a connection validator has been provided, then ensure that it\n\t\/\/ approves if the specified endpoint configuration.\n\tif endpointServerOptions.connectionValidator != nil {\n\t\terr := endpointServerOptions.connectionValidator(\n\t\t\trequest.Root,\n\t\t\trequest.Session,\n\t\t\trequest.Version,\n\t\t\trequest.Configuration,\n\t\t\trequest.Alpha,\n\t\t)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"endpoint configuration rejected\")\n\t\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Ensure that the initialization request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\terr = errors.Wrap(err, \"invalid initialize request\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t}\n\n\t\/\/ Expand and normalize the root path.\n\tif r, err := filesystem.Normalize(request.Root); err != nil {\n\t\terr = errors.Wrap(err, \"unable to normalize synchronization root\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t} else {\n\t\trequest.Root = r\n\t}\n\n\t\/\/ Create the underlying endpoint. If it fails to create, then send a\n\t\/\/ failure response and abort. If it succeeds, then defer its closure.\n\tendpoint, err := local.NewEndpoint(\n\t\tlogger,\n\t\trequest.Root,\n\t\trequest.Session,\n\t\trequest.Version,\n\t\trequest.Configuration,\n\t\trequest.Alpha,\n\t\tendpointServerOptions.endpointOptions...,\n\t)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"unable to create underlying endpoint\")\n\t\tencoder.Encode(&InitializeSynchronizationResponse{Error: err.Error()})\n\t\treturn err\n\t}\n\tdefer endpoint.Shutdown()\n\n\t\/\/ Send a successful initialize response.\n\tif err = encoder.Encode(&InitializeSynchronizationResponse{}); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send initialize response\")\n\t}\n\n\t\/\/ Create the server.\n\tserver := &endpointServer{\n\t\tendpoint: endpoint,\n\t\tencoder: encoder,\n\t\tdecoder: decoder,\n\t}\n\n\t\/\/ Server until an error occurs.\n\treturn server.serve()\n}\n\n\/\/ serve is the main request handling loop.\nfunc (s *endpointServer) serve() error {\n\t\/\/ Keep a reusable endpoint request.\n\trequest := &EndpointRequest{}\n\n\t\/\/ Receive and process control requests until there's an error.\n\tfor {\n\t\t\/\/ Receive the next request.\n\t\t*request = EndpointRequest{}\n\t\tif err := s.decoder.Decode(request); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to receive request\")\n\t\t} else if err = request.ensureValid(); err != nil {\n\t\t\treturn errors.Wrap(err, \"invalid endpoint request\")\n\t\t}\n\n\t\t\/\/ Handle the request based on type.\n\t\tif request.Poll != nil {\n\t\t\tif err := s.servePoll(request.Poll); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve poll request\")\n\t\t\t}\n\t\t} else if request.Scan != nil {\n\t\t\tif err := s.serveScan(request.Scan); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve scan request\")\n\t\t\t}\n\t\t} else if request.Stage != nil {\n\t\t\tif err := s.serveStage(request.Stage); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve stage request\")\n\t\t\t}\n\t\t} else if request.Supply != nil {\n\t\t\tif err := s.serveSupply(request.Supply); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve supply request\")\n\t\t\t}\n\t\t} else if request.Transition != nil {\n\t\t\tif err := s.serveTransition(request.Transition); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to serve transition request\")\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ TODO: Should we panic here? The request validation already\n\t\t\t\/\/ ensures that one and only one message component is set, so we\n\t\t\t\/\/ should never hit this condition.\n\t\t\treturn errors.New(\"invalid request\")\n\t\t}\n\t}\n}\n\n\/\/ servePoll serves a poll request.\nfunc (s *endpointServer) servePoll(request *PollRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid poll request\")\n\t}\n\n\t\/\/ Create a cancellable context for executing the poll.\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t\/\/ Start a Goroutine to execute the poll and send a response when done.\n\tresponseSendErrors := make(chan error, 1)\n\tgo func() {\n\t\tresponse := &PollResponse{}\n\t\tif err := s.endpoint.Poll(ctx); err != nil {\n\t\t\tresponse.Error = err.Error()\n\t\t}\n\t\tresponseSendErrors <- s.encoder.Encode(response)\n\t}()\n\n\t\/\/ Start a Goroutine to watch for the completion request.\n\tcompletionReceiveErrors := make(chan error, 1)\n\tgo func() {\n\t\trequest := &PollCompletionRequest{}\n\t\tcompletionReceiveErrors <- s.decoder.Decode(request)\n\t}()\n\n\t\/\/ Wait for both a completion request to be received and a response to be\n\t\/\/ sent. Both of these will occur, though their order is not known. If the\n\t\/\/ completion request is received first, then we cancel the subcontext to\n\t\/\/ preempt the scan and force transmission of a response. If the response is\n\t\/\/ sent first, then we know the completion request is on its way. In this\n\t\/\/ case, we still cancel the subcontext we created as required by the\n\t\/\/ context package to avoid leaking resources.\n\tvar responseSendErr, completionReceiveErr error\n\tselect {\n\tcase completionReceiveErr = <-completionReceiveErrors:\n\t\tcancel()\n\t\tresponseSendErr = <-responseSendErrors\n\tcase responseSendErr = <-responseSendErrors:\n\t\tcancel()\n\t\tcompletionReceiveErr = <-completionReceiveErrors\n\t}\n\n\t\/\/ Check for errors.\n\tif responseSendErr != nil {\n\t\treturn responseSendErr\n\t} else if completionReceiveErr != nil {\n\t\treturn completionReceiveErr\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveScan serves a scan request.\nfunc (s *endpointServer) serveScan(request *ScanRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid scan request\")\n\t}\n\n\t\/\/ Create a cancellable context for executing the scan. The context may be\n\t\/\/ cancelled to force a response, but in case the response comes naturally,\n\t\/\/ ensure the context is cancelled before we're done to avoid leaking a\n\t\/\/ Goroutine.\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t\/\/ Start a Goroutine to execute the scan and send a response when done.\n\tresponseSendErrors := make(chan error, 1)\n\tgo func() {\n\t\t\/\/ Perform a scan. Passing a nil ancestor is fine - it's not used for\n\t\t\/\/ local endpoints anyway. If a retry is requested or an error occurs,\n\t\t\/\/ send a response.\n\t\tsnapshot, preservesExecutability, err, tryAgain := s.endpoint.Scan(ctx, nil, request.Full)\n\t\tif err != nil {\n\t\t\tresponseSendErrors <- s.encoder.Encode(&ScanResponse{\n\t\t\t\tError: err.Error(),\n\t\t\t\tTryAgain: tryAgain,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Marshal the snapshot in a deterministic fashion.\n\t\tbuffer := proto.NewBuffer(nil)\n\t\tbuffer.SetDeterministic(true)\n\t\tif err := buffer.Marshal(&core.Archive{Root: snapshot}); err != nil {\n\t\t\tresponseSendErrors <- s.encoder.Encode(&ScanResponse{\n\t\t\t\tError: errors.Wrap(err, \"unable to marshal snapshot\").Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tsnapshotBytes := buffer.Bytes()\n\n\t\t\/\/ Create an rsync engine.\n\t\tengine := rsync.NewEngine()\n\n\t\t\/\/ Compute the snapshot's delta against the base.\n\t\tdelta := engine.DeltafyBytes(snapshotBytes, request.BaseSnapshotSignature, 0)\n\n\t\t\/\/ Send the response.\n\t\tresponseSendErrors <- s.encoder.Encode(&ScanResponse{\n\t\t\tSnapshotDelta: delta,\n\t\t\tPreservesExecutability: preservesExecutability,\n\t\t})\n\t}()\n\n\t\/\/ Start a Goroutine to watch for the completion request.\n\tcompletionReceiveErrors := make(chan error, 1)\n\tgo func() {\n\t\trequest := &ScanCompletionRequest{}\n\t\tcompletionReceiveErrors <- s.decoder.Decode(request)\n\t}()\n\n\t\/\/ Wait for both a completion request to be received and a response to be\n\t\/\/ sent. Both of these will occur, though their order is not known. If the\n\t\/\/ completion request is received first, then we cancel the subcontext to\n\t\/\/ preempt the scan and force transmission of a response. If the response is\n\t\/\/ sent first, then we know the completion request is on its way. In this\n\t\/\/ case, we still cancel the subcontext we created as required by the\n\t\/\/ context package to avoid leaking resources.\n\tvar responseSendErr, completionReceiveErr error\n\tselect {\n\tcase completionReceiveErr = <-completionReceiveErrors:\n\t\tcancel()\n\t\tresponseSendErr = <-responseSendErrors\n\tcase responseSendErr = <-responseSendErrors:\n\t\tcancel()\n\t\tcompletionReceiveErr = <-completionReceiveErrors\n\t}\n\n\t\/\/ Check for errors.\n\tif responseSendErr != nil {\n\t\treturn responseSendErr\n\t} else if completionReceiveErr != nil {\n\t\treturn completionReceiveErr\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveStage serves a stage request.\nfunc (s *endpointServer) serveStage(request *StageRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid stage request\")\n\t}\n\n\t\/\/ Begin staging.\n\tpaths, signatures, receiver, err := s.endpoint.Stage(request.Paths, request.Digests)\n\tif err != nil {\n\t\ts.encoder.Encode(&StageResponse{Error: err.Error()})\n\t\treturn errors.Wrap(err, \"unable to begin staging\")\n\t}\n\n\t\/\/ Send the response.\n\tresponse := &StageResponse{\n\t\tPaths: paths,\n\t\tSignatures: signatures,\n\t}\n\tif err = s.encoder.Encode(response); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send stage response\")\n\t}\n\n\t\/\/ If there weren't any paths requiring staging, then we're done.\n\tif len(paths) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ The remote side of the connection should now forward rsync operations, so\n\t\/\/ we need to decode and forward them to the receiver. If this operation\n\t\/\/ completes successfully, staging is complete and successful.\n\tdecoder := newProtobufRsyncDecoder(s.decoder)\n\tif err = rsync.DecodeToReceiver(decoder, uint64(len(paths)), receiver); err != nil {\n\t\treturn errors.Wrap(err, \"unable to decode and forward rsync operations\")\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveSupply serves a supply request.\nfunc (s *endpointServer) serveSupply(request *SupplyRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid supply request\")\n\t}\n\n\t\/\/ Create an encoding receiver that can transmit rsync operations to the\n\t\/\/ remote.\n\tencoder := newProtobufRsyncEncoder(s.encoder)\n\treceiver := rsync.NewEncodingReceiver(encoder)\n\n\t\/\/ Perform supplying.\n\tif err := s.endpoint.Supply(request.Paths, request.Signatures, receiver); err != nil {\n\t\treturn errors.Wrap(err, \"unable to perform supplying\")\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n\n\/\/ serveTransitino serves a transition request.\nfunc (s *endpointServer) serveTransition(request *TransitionRequest) error {\n\t\/\/ Ensure the request is valid.\n\tif err := request.ensureValid(); err != nil {\n\t\treturn errors.Wrap(err, \"invalid transition request\")\n\t}\n\n\t\/\/ Perform the transition.\n\tresults, problems, stagerMissingFiles, err := s.endpoint.Transition(request.Transitions)\n\tif err != nil {\n\t\treturn s.encoder.Encode(&TransitionResponse{Error: err.Error()})\n\t}\n\n\t\/\/ HACK: Wrap the results in Archives since Protocol Buffers can't encode\n\t\/\/ nil pointers in the result array.\n\twrappedResults := make([]*core.Archive, len(results))\n\tfor r, result := range results {\n\t\twrappedResults[r] = &core.Archive{Root: result}\n\t}\n\n\t\/\/ Send the response.\n\tresponse := &TransitionResponse{\n\t\tResults: wrappedResults,\n\t\tProblems: problems,\n\t\tStagerMissingFiles: stagerMissingFiles,\n\t}\n\tif err = s.encoder.Encode(response); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Success.\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage network\n\n\/\/go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE\n\n\/*\n ATTENTION: Rerun code generators when interface signatures are modified.\n*\/\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/vishvananda\/netlink\"\n\n\t\"kubevirt.io\/kubevirt\/pkg\/log\"\n\t\"kubevirt.io\/kubevirt\/pkg\/precond\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-launcher\/virtwrap\/api\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-launcher\/virtwrap\/network\/dhcp\"\n\n\tlmf \"github.com\/subgraph\/libmacouflage\"\n)\n\nconst (\n\tpodInterface = \"eth0\"\n\tguestDNS = \"8.8.8.8\"\n)\n\nvar interfaceCacheFile = \"\/var\/run\/kubevirt-private\/interface-cache.json\"\nvar bridgeFakeIP = \"10.11.12.13\/24\"\n\n\/\/ only used by unit test suite\nfunc setInterfaceCacheFile(path string) {\n\tinterfaceCacheFile = path\n}\n\ntype VIF struct {\n\tName string\n\tIP netlink.Addr\n\tMAC net.HardwareAddr\n\tGateway net.IP\n}\n\ntype NetworkHandler interface {\n\tLinkByName(name string) (netlink.Link, error)\n\tAddrList(link netlink.Link, family int) ([]netlink.Addr, error)\n\tRouteList(link netlink.Link, family int) ([]netlink.Route, error)\n\tAddrDel(link netlink.Link, addr *netlink.Addr) error\n\tAddrAdd(link netlink.Link, addr *netlink.Addr) error\n\tLinkSetDown(link netlink.Link) error\n\tLinkSetUp(link netlink.Link) error\n\tLinkAdd(link netlink.Link) error\n\tParseAddr(s string) (*netlink.Addr, error)\n\tChangeMacAddr(iface string) (net.HardwareAddr, error)\n\tGetMacDetails(iface string) (net.HardwareAddr, error)\n\tStartDHCP(nic *VIF, serverAddr *netlink.Addr)\n}\n\ntype NetworkUtilsHandler struct{}\n\nvar Handler NetworkHandler\n\nfunc (h *NetworkUtilsHandler) LinkByName(name string) (netlink.Link, error) {\n\treturn netlink.LinkByName(name)\n}\nfunc (h *NetworkUtilsHandler) AddrList(link netlink.Link, family int) ([]netlink.Addr, error) {\n\treturn netlink.AddrList(link, family)\n}\nfunc (h *NetworkUtilsHandler) RouteList(link netlink.Link, family int) ([]netlink.Route, error) {\n\treturn netlink.RouteList(link, family)\n}\nfunc (h *NetworkUtilsHandler) AddrDel(link netlink.Link, addr *netlink.Addr) error {\n\treturn netlink.AddrDel(link, addr)\n}\nfunc (h *NetworkUtilsHandler) LinkSetDown(link netlink.Link) error {\n\treturn netlink.LinkSetDown(link)\n}\nfunc (h *NetworkUtilsHandler) LinkSetUp(link netlink.Link) error {\n\treturn netlink.LinkSetUp(link)\n}\nfunc (h *NetworkUtilsHandler) LinkAdd(link netlink.Link) error {\n\treturn netlink.LinkAdd(link)\n}\nfunc (h *NetworkUtilsHandler) ParseAddr(s string) (*netlink.Addr, error) {\n\treturn netlink.ParseAddr(s)\n}\nfunc (h *NetworkUtilsHandler) AddrAdd(link netlink.Link, addr *netlink.Addr) error {\n\treturn netlink.AddrAdd(link, addr)\n}\n\n\/\/ GetMacDetails from an interface\nfunc (h *NetworkUtilsHandler) GetMacDetails(iface string) (net.HardwareAddr, error) {\n\tcurrentMac, err := lmf.GetCurrentMac(iface)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get mac information for interface: %s\", iface)\n\t\treturn nil, err\n\t}\n\treturn currentMac, nil\n}\n\n\/\/ ChangeMacAddr changes the MAC address for a agiven interface\nfunc (h *NetworkUtilsHandler) ChangeMacAddr(iface string) (net.HardwareAddr, error) {\n\tvar mac net.HardwareAddr\n\n\tcurrentMac, err := Handler.GetMacDetails(iface)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchanged, err := lmf.SpoofMacRandom(iface, false)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to spoof MAC for iface: %s\", iface)\n\t\treturn nil, err\n\t}\n\n\tif changed {\n\t\tmac, err = Handler.GetMacDetails(iface)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.Log.Reason(err).Errorf(\"Updated Mac for iface: %s - %s\", iface, mac)\n\t}\n\treturn currentMac, nil\n}\n\nfunc (h *NetworkUtilsHandler) StartDHCP(nic *VIF, serverAddr *netlink.Addr) {\n\t\/\/ panic in case the DHCP server failed during the vm creation\n\t\/\/ but ignore dhcp errors when the vm is destroyed or shutting down\n\tif err := DHCPServer(\n\t\tnic.MAC,\n\t\tnic.IP.IP,\n\t\tnic.IP.Mask,\n\t\tapi.DefaultBridgeName,\n\t\tserverAddr.IP,\n\t\tnic.Gateway,\n\t\tnet.ParseIP(guestDNS),\n\t); err != nil {\n\t\tlog.Log.Errorf(\"failed to run DHCP: %v\", err)\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Allow mocking for tests\nvar SetupPodNetwork = SetupDefaultPodNetwork\nvar DHCPServer = dhcp.SingleClientDHCPServer\n\nfunc initHandler() {\n\tif Handler == nil {\n\t\tHandler = &NetworkUtilsHandler{}\n\t}\n}\n\n\/\/ SetupDefaultPodNetwork will prepare the pod management network to be used by a virtual machine\n\/\/ which will own the pod network IP and MAC. Pods MAC address will be changed to a\n\/\/ random address and IP will be deleted. This will also create a macvlan device with a fake IP.\n\/\/ DHCP server will be started and bounded to the macvlan interface to server the original pod ip\n\/\/ to the guest OS\nfunc SetupDefaultPodNetwork(domain *api.Domain) error {\n\tprecond.MustNotBeNil(domain)\n\tinitHandler()\n\n\t\/\/ There should alway be a pre-configured interface for the default pod interface.\n\tdefaultIconf := domain.Spec.Devices.Interfaces[0]\n\n\tifconf, err := getCachedInterface()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ifconf == nil {\n\t\tvif := &VIF{Name: podInterface}\n\t\tpodNicLink, err := discoverPodNetworkInterface(vif)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := preparePodNetworkInterfaces(vif, podNicLink); err != nil {\n\t\t\tlog.Log.Reason(err).Critical(\"failed to prepared pod networking\")\n\t\t\tpanic(err)\n\t\t}\n\n\t\t\/\/ Start DHCP Server\n\t\tfakeServerAddr, _ := netlink.ParseAddr(bridgeFakeIP)\n\t\tgo Handler.StartDHCP(vif, fakeServerAddr)\n\n\t\t\/\/ After the network is configured, cache the result\n\t\t\/\/ in case this function is called again.\n\t\tdecorateInterfaceConfig(vif, &defaultIconf)\n\t\terr = setCachedInterface(&defaultIconf)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t\/\/ TODO:(vladikr) Currently we support only one interface per vm.\n\t\/\/ Improve this once we'll start supporting more.\n\tif len(domain.Spec.Devices.Interfaces) == 0 {\n\t\tdomain.Spec.Devices.Interfaces = append(domain.Spec.Devices.Interfaces, defaultIconf)\n\t} else {\n\t\tdomain.Spec.Devices.Interfaces[0] = defaultIconf\n\t}\n\n\treturn nil\n}\n\nfunc setCachedInterface(ifconf *api.Interface) error {\n\tbuf, err := json.MarshalIndent(&ifconf, \"\", \" \")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshaling interface cache: %v\", err)\n\t}\n\terr = ioutil.WriteFile(interfaceCacheFile, buf, 0644)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing interface cache %v\", err)\n\t}\n\treturn nil\n}\n\nfunc getCachedInterface() (*api.Interface, error) {\n\tbuf, err := ioutil.ReadFile(interfaceCacheFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tifconf := api.Interface{}\n\terr = json.Unmarshal(buf, &ifconf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error unmarshaling interface: %v\", err)\n\t}\n\treturn &ifconf, nil\n}\n\nfunc discoverPodNetworkInterface(nic *VIF) (netlink.Link, error) {\n\tnicLink, err := Handler.LinkByName(podInterface)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get a link for interface: %s\", podInterface)\n\t\treturn nil, err\n\t}\n\n\t\/\/ get IP address\n\taddrList, err := Handler.AddrList(nicLink, netlink.FAMILY_V4)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get an ip address for %s\", podInterface)\n\t\treturn nil, err\n\t}\n\tif len(addrList) == 0 {\n\t\treturn nil, fmt.Errorf(\"No IP address found on %s\", podInterface)\n\t}\n\tnic.IP = addrList[0]\n\n\t\/\/ Get interface gateway\n\troutes, err := Handler.RouteList(nicLink, netlink.FAMILY_V4)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get routes for %s\", podInterface)\n\t\treturn nil, err\n\t}\n\tif len(routes) == 0 {\n\t\treturn nil, fmt.Errorf(\"No gateway address found in routes for %s\", podInterface)\n\t}\n\tnic.Gateway = routes[0].Gw\n\n\t\/\/ Get interface MAC address\n\tmac, err := Handler.GetMacDetails(podInterface)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get MAC for %s\", podInterface)\n\t\treturn nil, err\n\t}\n\tnic.MAC = mac\n\treturn nicLink, nil\n}\n\nfunc preparePodNetworkInterfaces(nic *VIF, nicLink netlink.Link) error {\n\t\/\/ Remove IP from POD interface\n\terr := Handler.AddrDel(nicLink, &nic.IP)\n\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to delete link for interface: %s\", podInterface)\n\t\treturn err\n\t}\n\n\t\/\/ Set interface link to down to change its MAC address\n\terr = Handler.LinkSetDown(nicLink)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link down for interface: %s\", podInterface)\n\t\treturn err\n\t}\n\n\t_, err = Handler.ChangeMacAddr(podInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = Handler.LinkSetUp(nicLink)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link up for interface: %s\", podInterface)\n\t\treturn err\n\t}\n\n\t\/\/ Create a bridge\n\tbridge := &netlink.Bridge{\n\t\tLinkAttrs: netlink.LinkAttrs{\n\t\t\tName: api.DefaultBridgeName,\n\t\t},\n\t}\n\terr = Handler.LinkAdd(bridge)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to create a bridge\")\n\t\treturn err\n\t}\n\tnetlink.LinkSetMaster(nicLink, bridge)\n\n\terr = Handler.LinkSetUp(bridge)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link up for interface: %s\", api.DefaultBridgeName)\n\t\treturn err\n\t}\n\n\t\/\/ set fake ip on a bridge\n\tfakeaddr, err := Handler.ParseAddr(bridgeFakeIP)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link up for interface: %s\", api.DefaultBridgeName)\n\t\treturn err\n\t}\n\n\tif err := Handler.AddrAdd(bridge, fakeaddr); err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to set macvlan IP\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc decorateInterfaceConfig(vif *VIF, ifconf *api.Interface) {\n\n\tifconf.MAC = &api.MAC{MAC: vif.MAC.String()}\n}\n<commit_msg>Provide all relevant POD routes to the guest<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage network\n\n\/\/go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE\n\n\/*\n ATTENTION: Rerun code generators when interface signatures are modified.\n*\/\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/vishvananda\/netlink\"\n\n\t\"kubevirt.io\/kubevirt\/pkg\/log\"\n\t\"kubevirt.io\/kubevirt\/pkg\/precond\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-launcher\/virtwrap\/api\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-launcher\/virtwrap\/network\/dhcp\"\n\n\tlmf \"github.com\/subgraph\/libmacouflage\"\n)\n\nconst (\n\tpodInterface = \"eth0\"\n\tguestDNS = \"8.8.8.8\"\n)\n\nvar interfaceCacheFile = \"\/var\/run\/kubevirt-private\/interface-cache.json\"\nvar bridgeFakeIP = \"10.11.12.13\/24\"\n\n\/\/ only used by unit test suite\nfunc setInterfaceCacheFile(path string) {\n\tinterfaceCacheFile = path\n}\n\ntype VIF struct {\n\tName string\n\tIP netlink.Addr\n\tMAC net.HardwareAddr\n\tGateway net.IP\n\tRoutes *[]netlink.Route\n}\n\ntype NetworkHandler interface {\n\tLinkByName(name string) (netlink.Link, error)\n\tAddrList(link netlink.Link, family int) ([]netlink.Addr, error)\n\tRouteList(link netlink.Link, family int) ([]netlink.Route, error)\n\tAddrDel(link netlink.Link, addr *netlink.Addr) error\n\tAddrAdd(link netlink.Link, addr *netlink.Addr) error\n\tLinkSetDown(link netlink.Link) error\n\tLinkSetUp(link netlink.Link) error\n\tLinkAdd(link netlink.Link) error\n\tParseAddr(s string) (*netlink.Addr, error)\n\tChangeMacAddr(iface string) (net.HardwareAddr, error)\n\tGetMacDetails(iface string) (net.HardwareAddr, error)\n\tStartDHCP(nic *VIF, serverAddr *netlink.Addr)\n}\n\ntype NetworkUtilsHandler struct{}\n\nvar Handler NetworkHandler\n\nfunc (h *NetworkUtilsHandler) LinkByName(name string) (netlink.Link, error) {\n\treturn netlink.LinkByName(name)\n}\nfunc (h *NetworkUtilsHandler) AddrList(link netlink.Link, family int) ([]netlink.Addr, error) {\n\treturn netlink.AddrList(link, family)\n}\nfunc (h *NetworkUtilsHandler) RouteList(link netlink.Link, family int) ([]netlink.Route, error) {\n\treturn netlink.RouteList(link, family)\n}\nfunc (h *NetworkUtilsHandler) AddrDel(link netlink.Link, addr *netlink.Addr) error {\n\treturn netlink.AddrDel(link, addr)\n}\nfunc (h *NetworkUtilsHandler) LinkSetDown(link netlink.Link) error {\n\treturn netlink.LinkSetDown(link)\n}\nfunc (h *NetworkUtilsHandler) LinkSetUp(link netlink.Link) error {\n\treturn netlink.LinkSetUp(link)\n}\nfunc (h *NetworkUtilsHandler) LinkAdd(link netlink.Link) error {\n\treturn netlink.LinkAdd(link)\n}\nfunc (h *NetworkUtilsHandler) ParseAddr(s string) (*netlink.Addr, error) {\n\treturn netlink.ParseAddr(s)\n}\nfunc (h *NetworkUtilsHandler) AddrAdd(link netlink.Link, addr *netlink.Addr) error {\n\treturn netlink.AddrAdd(link, addr)\n}\n\n\/\/ GetMacDetails from an interface\nfunc (h *NetworkUtilsHandler) GetMacDetails(iface string) (net.HardwareAddr, error) {\n\tcurrentMac, err := lmf.GetCurrentMac(iface)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get mac information for interface: %s\", iface)\n\t\treturn nil, err\n\t}\n\treturn currentMac, nil\n}\n\n\/\/ ChangeMacAddr changes the MAC address for a agiven interface\nfunc (h *NetworkUtilsHandler) ChangeMacAddr(iface string) (net.HardwareAddr, error) {\n\tvar mac net.HardwareAddr\n\n\tcurrentMac, err := Handler.GetMacDetails(iface)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchanged, err := lmf.SpoofMacRandom(iface, false)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to spoof MAC for iface: %s\", iface)\n\t\treturn nil, err\n\t}\n\n\tif changed {\n\t\tmac, err = Handler.GetMacDetails(iface)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.Log.Reason(err).Errorf(\"Updated Mac for iface: %s - %s\", iface, mac)\n\t}\n\treturn currentMac, nil\n}\n\nfunc (h *NetworkUtilsHandler) StartDHCP(nic *VIF, serverAddr *netlink.Addr) {\n\t\/\/ panic in case the DHCP server failed during the vm creation\n\t\/\/ but ignore dhcp errors when the vm is destroyed or shutting down\n\tif err := DHCPServer(\n\t\tnic.MAC,\n\t\tnic.IP.IP,\n\t\tnic.IP.Mask,\n\t\tapi.DefaultBridgeName,\n\t\tserverAddr.IP,\n\t\tnic.Gateway,\n\t\tnet.ParseIP(guestDNS),\n\t\tnic.Routes,\n\t); err != nil {\n\t\tlog.Log.Errorf(\"failed to run DHCP: %v\", err)\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Allow mocking for tests\nvar SetupPodNetwork = SetupDefaultPodNetwork\nvar DHCPServer = dhcp.SingleClientDHCPServer\n\nfunc initHandler() {\n\tif Handler == nil {\n\t\tHandler = &NetworkUtilsHandler{}\n\t}\n}\n\n\/\/ SetupDefaultPodNetwork will prepare the pod management network to be used by a virtual machine\n\/\/ which will own the pod network IP and MAC. Pods MAC address will be changed to a\n\/\/ random address and IP will be deleted. This will also create a macvlan device with a fake IP.\n\/\/ DHCP server will be started and bounded to the macvlan interface to server the original pod ip\n\/\/ to the guest OS\nfunc SetupDefaultPodNetwork(domain *api.Domain) error {\n\tprecond.MustNotBeNil(domain)\n\tinitHandler()\n\n\t\/\/ There should alway be a pre-configured interface for the default pod interface.\n\tdefaultIconf := domain.Spec.Devices.Interfaces[0]\n\n\tifconf, err := getCachedInterface()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ifconf == nil {\n\t\tvif := &VIF{Name: podInterface}\n\t\tpodNicLink, err := discoverPodNetworkInterface(vif)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := preparePodNetworkInterfaces(vif, podNicLink); err != nil {\n\t\t\tlog.Log.Reason(err).Critical(\"failed to prepared pod networking\")\n\t\t\tpanic(err)\n\t\t}\n\n\t\t\/\/ Start DHCP Server\n\t\tfakeServerAddr, _ := netlink.ParseAddr(bridgeFakeIP)\n\t\tgo Handler.StartDHCP(vif, fakeServerAddr)\n\n\t\t\/\/ After the network is configured, cache the result\n\t\t\/\/ in case this function is called again.\n\t\tdecorateInterfaceConfig(vif, &defaultIconf)\n\t\terr = setCachedInterface(&defaultIconf)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t\/\/ TODO:(vladikr) Currently we support only one interface per vm.\n\t\/\/ Improve this once we'll start supporting more.\n\tif len(domain.Spec.Devices.Interfaces) == 0 {\n\t\tdomain.Spec.Devices.Interfaces = append(domain.Spec.Devices.Interfaces, defaultIconf)\n\t} else {\n\t\tdomain.Spec.Devices.Interfaces[0] = defaultIconf\n\t}\n\n\treturn nil\n}\n\nfunc setCachedInterface(ifconf *api.Interface) error {\n\tbuf, err := json.MarshalIndent(&ifconf, \"\", \" \")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshaling interface cache: %v\", err)\n\t}\n\terr = ioutil.WriteFile(interfaceCacheFile, buf, 0644)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing interface cache %v\", err)\n\t}\n\treturn nil\n}\n\nfunc getCachedInterface() (*api.Interface, error) {\n\tbuf, err := ioutil.ReadFile(interfaceCacheFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tifconf := api.Interface{}\n\terr = json.Unmarshal(buf, &ifconf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error unmarshaling interface: %v\", err)\n\t}\n\treturn &ifconf, nil\n}\n\nfunc discoverPodNetworkInterface(nic *VIF) (netlink.Link, error) {\n\tnicLink, err := Handler.LinkByName(podInterface)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get a link for interface: %s\", podInterface)\n\t\treturn nil, err\n\t}\n\n\t\/\/ get IP address\n\taddrList, err := Handler.AddrList(nicLink, netlink.FAMILY_V4)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get an ip address for %s\", podInterface)\n\t\treturn nil, err\n\t}\n\tif len(addrList) == 0 {\n\t\treturn nil, fmt.Errorf(\"No IP address found on %s\", podInterface)\n\t}\n\tnic.IP = addrList[0]\n\n\t\/\/ Get interface gateway\n\troutes, err := Handler.RouteList(nicLink, netlink.FAMILY_V4)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get routes for %s\", podInterface)\n\t\treturn nil, err\n\t}\n\tif len(routes) == 0 {\n\t\treturn nil, fmt.Errorf(\"No gateway address found in routes for %s\", podInterface)\n\t}\n\tnic.Gateway = routes[0].Gw\n\tvar dhcpRoutes []netlink.Route\n\tif len(routes) > 1 {\n\t\t\/\/ Filter out irrelevant routes\n\t\tfor _, route := range routes[1:] {\n\t\t\tif !route.Src.Equal(nic.IP.IP) {\n\t\t\t\tdhcpRoutes = append(dhcpRoutes, route)\n\t\t\t}\n\t\t}\n\t\tnic.Routes = &dhcpRoutes\n\t}\n\n\t\/\/ Get interface MAC address\n\tmac, err := Handler.GetMacDetails(podInterface)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to get MAC for %s\", podInterface)\n\t\treturn nil, err\n\t}\n\tnic.MAC = mac\n\treturn nicLink, nil\n}\n\nfunc preparePodNetworkInterfaces(nic *VIF, nicLink netlink.Link) error {\n\t\/\/ Remove IP from POD interface\n\terr := Handler.AddrDel(nicLink, &nic.IP)\n\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to delete link for interface: %s\", podInterface)\n\t\treturn err\n\t}\n\n\t\/\/ Set interface link to down to change its MAC address\n\terr = Handler.LinkSetDown(nicLink)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link down for interface: %s\", podInterface)\n\t\treturn err\n\t}\n\n\t_, err = Handler.ChangeMacAddr(podInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = Handler.LinkSetUp(nicLink)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link up for interface: %s\", podInterface)\n\t\treturn err\n\t}\n\n\t\/\/ Create a bridge\n\tbridge := &netlink.Bridge{\n\t\tLinkAttrs: netlink.LinkAttrs{\n\t\t\tName: api.DefaultBridgeName,\n\t\t},\n\t}\n\terr = Handler.LinkAdd(bridge)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to create a bridge\")\n\t\treturn err\n\t}\n\tnetlink.LinkSetMaster(nicLink, bridge)\n\n\terr = Handler.LinkSetUp(bridge)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link up for interface: %s\", api.DefaultBridgeName)\n\t\treturn err\n\t}\n\n\t\/\/ set fake ip on a bridge\n\tfakeaddr, err := Handler.ParseAddr(bridgeFakeIP)\n\tif err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to bring link up for interface: %s\", api.DefaultBridgeName)\n\t\treturn err\n\t}\n\n\tif err := Handler.AddrAdd(bridge, fakeaddr); err != nil {\n\t\tlog.Log.Reason(err).Errorf(\"failed to set macvlan IP\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc decorateInterfaceConfig(vif *VIF, ifconf *api.Interface) {\n\n\tifconf.MAC = &api.MAC{MAC: vif.MAC.String()}\n}\n<|endoftext|>"} {"text":"<commit_before>package render\n\nimport (\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com\/cweill\/gotests\/models\"\n)\n\nfunc TestTestCases(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tw io.Writer\n\t\tf *models.Function\n\t\twantErr bool\n\t}{\n\t\/\/ TODO: Add test cases.\n\t}\n\tfor _, tt := range tests {\n\t\tif err := TestCases(tt.w, tt.f); (err != nil) != tt.wantErr {\n\t\t\tt.Errorf(\"%v. TestCases() error = %v, wantErr: %v\", tt.name, err, tt.wantErr)\n\t\t}\n\t}\n}\n<commit_msg>Remove accidentally generated test.<commit_after><|endoftext|>"} {"text":"<commit_before>package render\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"testing\"\n\n\t\"github.com\/200sc\/go-dist\/colorrange\"\n\t\"github.com\/200sc\/go-dist\/intrange\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\t\/\/ this is excessive for a lot of tests\n\t\/\/ but it takes away some decision making\n\t\/\/ and could reveal problems that probably aren't there\n\t\/\/ but hey you never know\n\twidths = intrange.NewLinear(1, 10)\n\theights = intrange.NewLinear(1, 10)\n\tcolors = colorrange.NewLinear(color.RGBA{0, 0, 0, 0}, color.RGBA{255, 255, 255, 255})\n\tseeds = intrange.NewLinear(0, 10000)\n)\n\nconst (\n\tfuzzCt = 10\n)\n\n\/\/ Todo for color boxes, and things that take w\/h --\n\/\/ return an error for negative (or 0 in some cases) w \/ h. The engine assumes\n\/\/ right now that the inputs will be valid, which is a mistake\n\/\/ this is a breaking change for 2.0\nfunc TestColorBoxFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\tc := colors.Poll()\n\t\tr, g, b, a := c.RGBA()\n\t\tcb := NewColorBox(w, h, c)\n\t\trgba := cb.GetRGBA()\n\t\tfor x := 0; x < w; x++ {\n\t\t\tfor y := 0; y < h; y++ {\n\t\t\t\tc2 := rgba.At(x, y)\n\t\t\t\tr2, g2, b2, a2 := c2.RGBA()\n\t\t\t\tassert.Equal(t, r, r2)\n\t\t\t\tassert.Equal(t, g, g2)\n\t\t\t\tassert.Equal(t, b, b2)\n\t\t\t\tassert.Equal(t, a, a2)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ GradientBoxes should use color ranges internally\n\nfunc TestNoiseBoxFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\tseed := int64(seeds.Poll())\n\t\tnb := NewSeededNoiseBox(w, h, seed)\n\t\tnb2 := NewSeededNoiseBox(w, h, seed+1)\n\t\t\/\/ This is a little awkward test, we could predict what a given seed\n\t\t\/\/ will give us but this just confirms that adjacent seeds won't give\n\t\t\/\/ us the same rgba.\n\t\tassert.NotEqual(t, nb.GetRGBA(), nb2.GetRGBA())\n\t}\n}\n\nfunc TestNoiseBox(t *testing.T) {\n\t\/\/ I'm not sure what exactly we would test about these.\n\tNewNoiseBox(10, 10)\n\tNewNoiseSequence(10, 10, 10, 10)\n\n}\n\nfunc TestEmptySpriteFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\ts := NewEmptySprite(0, 0, w, h)\n\t\trgba := s.GetRGBA()\n\t\tvar zero uint32\n\t\tfor x := 0; x < w; x++ {\n\t\t\tfor y := 0; y < h; y++ {\n\t\t\t\tc := rgba.At(x, y)\n\t\t\t\tr, g, b, a := c.RGBA()\n\t\t\t\tassert.Equal(t, r, zero)\n\t\t\t\tassert.Equal(t, g, zero)\n\t\t\t\tassert.Equal(t, b, zero)\n\t\t\t\tassert.Equal(t, a, zero)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSpriteFuncs(t *testing.T) {\n\ts := NewEmptySprite(0, 0, 1, 1)\n\ts2 := Sprite{}\n\ts3 := s.Copy()\n\n\t\/\/ Dims\n\n\tw, h := s.GetDims()\n\tassert.Equal(t, w, 1)\n\tassert.Equal(t, h, 1)\n\n\tw, h = s2.GetDims()\n\tassert.Equal(t, w, 6)\n\tassert.Equal(t, h, 6)\n\n\tw, h = s3.GetDims()\n\tassert.Equal(t, w, 1)\n\tassert.Equal(t, h, 1)\n\n\t\/\/ IsNil\n\n\tassert.Equal(t, false, s.IsNil())\n\tassert.Equal(t, true, s2.IsNil())\n\tassert.Equal(t, false, s3.(*Sprite).IsNil())\n\n\t\/\/ Set\/GetRGBA\n\n\trgba := image.NewRGBA(image.Rect(0, 0, 4, 4))\n\ts.SetRGBA(rgba)\n\trgba2 := s.GetRGBA()\n\tassert.Equal(t, rgba, rgba2)\n}\n\nfunc TestOverlaySprites(t *testing.T) {\n\t\/\/ This makes me wonder if overlay is easy enough to use\n\trgba := image.NewRGBA(image.Rect(0, 0, 2, 2))\n\trgba.Set(0, 0, color.RGBA{255, 0, 0, 255})\n\t\/\/ It should probably take in pointers\n\tsprites := []Sprite{\n\t\t*NewColorBox(2, 2, color.RGBA{0, 255, 0, 255}),\n\t\t*NewSprite(0, 0, rgba),\n\t}\n\toverlay := OverlaySprites(sprites)\n\trgba = overlay.GetRGBA()\n\tshouldRed := rgba.At(0, 0)\n\tshouldGreen := rgba.At(0, 1)\n\tassert.Equal(t, shouldRed, color.RGBA{255, 0, 0, 255})\n\tassert.Equal(t, shouldGreen, color.RGBA{0, 255, 0, 255})\n}\n\n\/\/ Can't test ParseSubSprite without loading in something for it to return,\n\/\/ ParseSubSprite also ignores an error for no good reason?\nfunc TestParseSubSprite(t *testing.T) {\n\tloadedImages[\"test\"] = NewColorBox(100, 100, color.RGBA{255, 0, 0, 255}).GetRGBA()\n\tsp := ParseSubSprite(\"test\", 0, 0, 25, 25, 0)\n\trgba := sp.GetRGBA()\n\tfor x := 0; x < 25; x++ {\n\t\tfor y := 0; y < 25; y++ {\n\t\t\tc := rgba.At(x, y)\n\t\t\tr, g, b, a := c.RGBA()\n\t\t\tassert.Equal(t, r, uint32(65535))\n\t\t\tassert.Equal(t, g, uint32(0))\n\t\t\tassert.Equal(t, b, uint32(0))\n\t\t\tassert.Equal(t, a, uint32(65535))\n\t\t}\n\t}\n\n}\n\nfunc TestModifySprite(t *testing.T) {\n\ts := NewColorBox(10, 10, color.RGBA{255, 0, 0, 255})\n\ts2 := s.Modify(Cut(5, 5))\n\tw, h := s2.GetDims()\n\tassert.Equal(t, 5, w)\n\tassert.Equal(t, 5, h)\n}\n\n\/\/ We'll cover drawing elsewhere\n<commit_msg>Added gradient box tests<commit_after>package render\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"testing\"\n\n\t\"github.com\/200sc\/go-dist\/colorrange\"\n\t\"github.com\/200sc\/go-dist\/intrange\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\t\/\/ this is excessive for a lot of tests\n\t\/\/ but it takes away some decision making\n\t\/\/ and could reveal problems that probably aren't there\n\t\/\/ but hey you never know\n\twidths = intrange.NewLinear(1, 10)\n\theights = intrange.NewLinear(1, 10)\n\tcolors = colorrange.NewLinear(color.RGBA{0, 0, 0, 0}, color.RGBA{255, 255, 255, 255})\n\tseeds = intrange.NewLinear(0, 10000)\n)\n\nconst (\n\tfuzzCt = 10\n)\n\n\/\/ Todo for color boxes, and things that take w\/h --\n\/\/ return an error for negative (or 0 in some cases) w \/ h. The engine assumes\n\/\/ right now that the inputs will be valid, which is a mistake\n\/\/ this is a breaking change for 2.0\nfunc TestColorBoxFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\tc := colors.Poll()\n\t\tr, g, b, a := c.RGBA()\n\t\tcb := NewColorBox(w, h, c)\n\t\trgba := cb.GetRGBA()\n\t\tfor x := 0; x < w; x++ {\n\t\t\tfor y := 0; y < h; y++ {\n\t\t\t\tc2 := rgba.At(x, y)\n\t\t\t\tr2, g2, b2, a2 := c2.RGBA()\n\t\t\t\tassert.Equal(t, r, r2)\n\t\t\t\tassert.Equal(t, g, g2)\n\t\t\t\tassert.Equal(t, b, b2)\n\t\t\t\tassert.Equal(t, a, a2)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ GradientBoxes should use color ranges internally?\nfunc TestGradientBoxFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\tc1 := colors.Poll()\n\t\tc2 := colors.Poll()\n\t\tr, g, b, a := c1.RGBA()\n\t\tr2, g2, b2, a2 := c2.RGBA()\n\t\tcb := NewHorizontalGradientBox(w, h, c1, c2)\n\t\trgba := cb.GetRGBA()\n\t\tfor x := 0; x < w; x++ {\n\t\t\tc3 := rgba.At(x, 0)\n\t\t\tr3, g3, b3, a3 := c3.RGBA()\n\t\t\tprogress := float64(x) \/ float64(w)\n\t\t\t\/\/ This sort of color math is frustrating\n\t\t\tc4 := color.RGBA{\n\t\t\t\tuint8(uint16OnScale(r, r2, progress) \/ 256),\n\t\t\t\tuint8(uint16OnScale(g, g2, progress) \/ 256),\n\t\t\t\tuint8(uint16OnScale(b, b2, progress) \/ 256),\n\t\t\t\tuint8(uint16OnScale(a, a2, progress) \/ 256),\n\t\t\t}\n\t\t\tr4, g4, b4, a4 := c4.RGBA()\n\t\t\tassert.Equal(t, r3, r4)\n\t\t\tassert.Equal(t, g3, g4)\n\t\t\tassert.Equal(t, b3, b4)\n\t\t\tassert.Equal(t, a3, a4)\n\t\t}\n\t\tcb = NewVerticalGradientBox(w, h, c1, c2)\n\t\trgba = cb.GetRGBA()\n\t\tfor y := 0; y < h; y++ {\n\t\t\tc3 := rgba.At(0, y)\n\t\t\tr3, g3, b3, a3 := c3.RGBA()\n\t\t\tprogress := float64(y) \/ float64(h)\n\t\t\t\/\/ This sort of color math is frustrating\n\t\t\tc4 := color.RGBA{\n\t\t\t\tuint8(uint16OnScale(r, r2, progress) \/ 256),\n\t\t\t\tuint8(uint16OnScale(g, g2, progress) \/ 256),\n\t\t\t\tuint8(uint16OnScale(b, b2, progress) \/ 256),\n\t\t\t\tuint8(uint16OnScale(a, a2, progress) \/ 256),\n\t\t\t}\n\t\t\tr4, g4, b4, a4 := c4.RGBA()\n\t\t\tassert.Equal(t, r3, r4)\n\t\t\tassert.Equal(t, g3, g4)\n\t\t\tassert.Equal(t, b3, b4)\n\t\t\tassert.Equal(t, a3, a4)\n\t\t}\n\t\tcb = NewCircularGradientBox(w, h, c1, c2)\n\t\trgba = cb.GetRGBA()\n\t\tfor x := 0; x < w; x++ {\n\t\t\tfor y := 0; y < h; y++ {\n\t\t\t\tc3 := rgba.At(x, y)\n\t\t\t\tr3, g3, b3, a3 := c3.RGBA()\n\t\t\t\tprogress := CircularProgress(x, y, w, h)\n\t\t\t\t\/\/ This sort of color math is frustrating\n\t\t\t\tc4 := color.RGBA{\n\t\t\t\t\tuint8(uint16OnScale(r, r2, progress) \/ 256),\n\t\t\t\t\tuint8(uint16OnScale(g, g2, progress) \/ 256),\n\t\t\t\t\tuint8(uint16OnScale(b, b2, progress) \/ 256),\n\t\t\t\t\tuint8(uint16OnScale(a, a2, progress) \/ 256),\n\t\t\t\t}\n\t\t\t\tr4, g4, b4, a4 := c4.RGBA()\n\t\t\t\tassert.Equal(t, r3, r4)\n\t\t\t\tassert.Equal(t, g3, g4)\n\t\t\t\tassert.Equal(t, b3, b4)\n\t\t\t\tassert.Equal(t, a3, a4)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestNoiseBoxFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\tseed := int64(seeds.Poll())\n\t\tnb := NewSeededNoiseBox(w, h, seed)\n\t\tnb2 := NewSeededNoiseBox(w, h, seed+1)\n\t\t\/\/ This is a little awkward test, we could predict what a given seed\n\t\t\/\/ will give us but this just confirms that adjacent seeds won't give\n\t\t\/\/ us the same rgba.\n\t\tassert.NotEqual(t, nb.GetRGBA(), nb2.GetRGBA())\n\t}\n}\n\nfunc TestNoiseBox(t *testing.T) {\n\t\/\/ I'm not sure what exactly we would test about these.\n\tNewNoiseBox(10, 10)\n\tNewNoiseSequence(10, 10, 10, 10)\n\n}\n\nfunc TestEmptySpriteFuzz(t *testing.T) {\n\tfor i := 0; i < fuzzCt; i++ {\n\t\tw := widths.Poll()\n\t\th := heights.Poll()\n\t\ts := NewEmptySprite(0, 0, w, h)\n\t\trgba := s.GetRGBA()\n\t\tvar zero uint32\n\t\tfor x := 0; x < w; x++ {\n\t\t\tfor y := 0; y < h; y++ {\n\t\t\t\tc := rgba.At(x, y)\n\t\t\t\tr, g, b, a := c.RGBA()\n\t\t\t\tassert.Equal(t, r, zero)\n\t\t\t\tassert.Equal(t, g, zero)\n\t\t\t\tassert.Equal(t, b, zero)\n\t\t\t\tassert.Equal(t, a, zero)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSpriteFuncs(t *testing.T) {\n\ts := NewEmptySprite(0, 0, 1, 1)\n\ts2 := Sprite{}\n\ts3 := s.Copy()\n\n\t\/\/ Dims\n\n\tw, h := s.GetDims()\n\tassert.Equal(t, w, 1)\n\tassert.Equal(t, h, 1)\n\n\tw, h = s2.GetDims()\n\tassert.Equal(t, w, 6)\n\tassert.Equal(t, h, 6)\n\n\tw, h = s3.GetDims()\n\tassert.Equal(t, w, 1)\n\tassert.Equal(t, h, 1)\n\n\t\/\/ IsNil\n\n\tassert.Equal(t, false, s.IsNil())\n\tassert.Equal(t, true, s2.IsNil())\n\tassert.Equal(t, false, s3.(*Sprite).IsNil())\n\n\t\/\/ Set\/GetRGBA\n\n\trgba := image.NewRGBA(image.Rect(0, 0, 4, 4))\n\ts.SetRGBA(rgba)\n\trgba2 := s.GetRGBA()\n\tassert.Equal(t, rgba, rgba2)\n}\n\nfunc TestOverlaySprites(t *testing.T) {\n\t\/\/ This makes me wonder if overlay is easy enough to use\n\trgba := image.NewRGBA(image.Rect(0, 0, 2, 2))\n\trgba.Set(0, 0, color.RGBA{255, 0, 0, 255})\n\t\/\/ It should probably take in pointers\n\tsprites := []Sprite{\n\t\t*NewColorBox(2, 2, color.RGBA{0, 255, 0, 255}),\n\t\t*NewSprite(0, 0, rgba),\n\t}\n\toverlay := OverlaySprites(sprites)\n\trgba = overlay.GetRGBA()\n\tshouldRed := rgba.At(0, 0)\n\tshouldGreen := rgba.At(0, 1)\n\tassert.Equal(t, shouldRed, color.RGBA{255, 0, 0, 255})\n\tassert.Equal(t, shouldGreen, color.RGBA{0, 255, 0, 255})\n}\n\n\/\/ Can't test ParseSubSprite without loading in something for it to return,\n\/\/ ParseSubSprite also ignores an error for no good reason?\nfunc TestParseSubSprite(t *testing.T) {\n\tloadedImages[\"test\"] = NewColorBox(100, 100, color.RGBA{255, 0, 0, 255}).GetRGBA()\n\tsp := ParseSubSprite(\"test\", 0, 0, 25, 25, 0)\n\trgba := sp.GetRGBA()\n\tfor x := 0; x < 25; x++ {\n\t\tfor y := 0; y < 25; y++ {\n\t\t\tc := rgba.At(x, y)\n\t\t\tr, g, b, a := c.RGBA()\n\t\t\tassert.Equal(t, r, uint32(65535))\n\t\t\tassert.Equal(t, g, uint32(0))\n\t\t\tassert.Equal(t, b, uint32(0))\n\t\t\tassert.Equal(t, a, uint32(65535))\n\t\t}\n\t}\n\n}\n\nfunc TestModifySprite(t *testing.T) {\n\ts := NewColorBox(10, 10, color.RGBA{255, 0, 0, 255})\n\ts2 := s.Modify(Cut(5, 5))\n\tw, h := s2.GetDims()\n\tassert.Equal(t, 5, w)\n\tassert.Equal(t, 5, h)\n}\n\n\/\/ We'll cover drawing elsewhere\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage grpcproxy\n\nimport (\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tpb \"github.com\/coreos\/etcd\/etcdserver\/etcdserverpb\"\n\t\"github.com\/coreos\/etcd\/proxy\/grpcproxy\/cache\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype kvProxy struct {\n\tc *clientv3.Client\n\tcache cache.Cache\n}\n\nfunc NewKvProxy(c *clientv3.Client) *kvProxy {\n\treturn &kvProxy{\n\t\tc: c,\n\t\tcache: cache.NewCache(cache.DefaultMaxEntries),\n\t}\n}\n\nfunc (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {\n\t\/\/ if request set Serializable, serve it from local cache first\n\tif r.Serializable {\n\t\tif resp, err := p.cache.Get(r); err == nil || err == cache.ErrCompacted {\n\t\t\treturn resp, err\n\t\t}\n\t}\n\n\tresp, err := p.c.Do(ctx, RangeRequestToOp(r))\n\tif err != nil {\n\t\tp.cache.Add(r, (*pb.RangeResponse)(resp.Get()))\n\t}\n\n\treturn (*pb.RangeResponse)(resp.Get()), err\n}\n\nfunc (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {\n\tresp, err := p.c.Do(ctx, PutRequestToOp(r))\n\treturn (*pb.PutResponse)(resp.Put()), err\n}\n\nfunc (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {\n\tresp, err := p.c.Do(ctx, DelRequestToOp(r))\n\treturn (*pb.DeleteRangeResponse)(resp.Del()), err\n}\n\nfunc (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {\n\ttxn := p.c.Txn(ctx)\n\tcmps := make([]clientv3.Cmp, len(r.Compare))\n\tthenops := make([]clientv3.Op, len(r.Success))\n\telseops := make([]clientv3.Op, len(r.Failure))\n\n\tfor i := range r.Compare {\n\t\tcmps[i] = (clientv3.Cmp)(*r.Compare[i])\n\t}\n\n\tfor i := range r.Success {\n\t\tthenops[i] = requestOpToOp(r.Success[i])\n\t}\n\n\tfor i := range r.Failure {\n\t\telseops[i] = requestOpToOp(r.Failure[i])\n\t}\n\n\tresp, err := txn.If(cmps...).Then(thenops...).Else(elseops...).Commit()\n\treturn (*pb.TxnResponse)(resp), err\n}\n\nfunc (p *kvProxy) Close() error {\n\treturn p.c.Close()\n}\n\nfunc requestOpToOp(union *pb.RequestOp) clientv3.Op {\n\tswitch tv := union.Request.(type) {\n\tcase *pb.RequestOp_RequestRange:\n\t\tif tv.RequestRange != nil {\n\t\t\treturn RangeRequestToOp(tv.RequestRange)\n\t\t}\n\tcase *pb.RequestOp_RequestPut:\n\t\tif tv.RequestPut != nil {\n\t\t\treturn PutRequestToOp(tv.RequestPut)\n\t\t}\n\tcase *pb.RequestOp_RequestDeleteRange:\n\t\tif tv.RequestDeleteRange != nil {\n\t\t\treturn DelRequestToOp(tv.RequestDeleteRange)\n\t\t}\n\t}\n\tpanic(\"unknown request\")\n}\n\nfunc RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {\n\topts := []clientv3.OpOption{}\n\tif len(r.RangeEnd) != 0 {\n\t\topts = append(opts, clientv3.WithRange(string(r.RangeEnd)))\n\t}\n\topts = append(opts, clientv3.WithRev(r.Revision))\n\topts = append(opts, clientv3.WithLimit(r.Limit))\n\topts = append(opts, clientv3.WithSort(\n\t\tclientv3.SortTarget(r.SortTarget),\n\t\tclientv3.SortOrder(r.SortOrder)),\n\t)\n\n\tif r.Serializable {\n\t\topts = append(opts, clientv3.WithSerializable())\n\t}\n\n\treturn clientv3.OpGet(string(r.Key), opts...)\n}\n\nfunc PutRequestToOp(r *pb.PutRequest) clientv3.Op {\n\topts := []clientv3.OpOption{}\n\topts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))\n\n\treturn clientv3.OpPut(string(r.Key), string(r.Value), opts...)\n}\n\nfunc DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {\n\topts := []clientv3.OpOption{}\n\tif len(r.RangeEnd) != 0 {\n\t\topts = append(opts, clientv3.WithRange(string(r.RangeEnd)))\n\t}\n\n\treturn clientv3.OpDelete(string(r.Key), opts...)\n}\n\nfunc (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {\n\tpanic(\"unimplemented\")\n}\n<commit_msg>proxy: implement compaction<commit_after>\/\/ Copyright 2016 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage grpcproxy\n\nimport (\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tpb \"github.com\/coreos\/etcd\/etcdserver\/etcdserverpb\"\n\t\"github.com\/coreos\/etcd\/proxy\/grpcproxy\/cache\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype kvProxy struct {\n\tc *clientv3.Client\n\tcache cache.Cache\n}\n\nfunc NewKvProxy(c *clientv3.Client) *kvProxy {\n\treturn &kvProxy{\n\t\tc: c,\n\t\tcache: cache.NewCache(cache.DefaultMaxEntries),\n\t}\n}\n\nfunc (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {\n\t\/\/ if request set Serializable, serve it from local cache first\n\tif r.Serializable {\n\t\tif resp, err := p.cache.Get(r); err == nil || err == cache.ErrCompacted {\n\t\t\treturn resp, err\n\t\t}\n\t}\n\n\tresp, err := p.c.Do(ctx, RangeRequestToOp(r))\n\tif err != nil {\n\t\tp.cache.Add(r, (*pb.RangeResponse)(resp.Get()))\n\t}\n\n\treturn (*pb.RangeResponse)(resp.Get()), err\n}\n\nfunc (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {\n\tresp, err := p.c.Do(ctx, PutRequestToOp(r))\n\treturn (*pb.PutResponse)(resp.Put()), err\n}\n\nfunc (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {\n\tresp, err := p.c.Do(ctx, DelRequestToOp(r))\n\treturn (*pb.DeleteRangeResponse)(resp.Del()), err\n}\n\nfunc (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {\n\ttxn := p.c.Txn(ctx)\n\tcmps := make([]clientv3.Cmp, len(r.Compare))\n\tthenops := make([]clientv3.Op, len(r.Success))\n\telseops := make([]clientv3.Op, len(r.Failure))\n\n\tfor i := range r.Compare {\n\t\tcmps[i] = (clientv3.Cmp)(*r.Compare[i])\n\t}\n\n\tfor i := range r.Success {\n\t\tthenops[i] = requestOpToOp(r.Success[i])\n\t}\n\n\tfor i := range r.Failure {\n\t\telseops[i] = requestOpToOp(r.Failure[i])\n\t}\n\n\tresp, err := txn.If(cmps...).Then(thenops...).Else(elseops...).Commit()\n\treturn (*pb.TxnResponse)(resp), err\n}\n\nfunc (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {\n\tvar opts []clientv3.CompactOption\n\tif r.Physical {\n\t\topts = append(opts, clientv3.WithCompactPhysical())\n\t}\n\n\tresp, err := p.c.KV.Compact(ctx, r.Revision, opts...)\n\tif err == nil {\n\t\tp.cache.Compact(r.Revision)\n\t}\n\n\treturn (*pb.CompactionResponse)(resp), err\n}\n\nfunc (p *kvProxy) Close() error {\n\treturn p.c.Close()\n}\n\nfunc requestOpToOp(union *pb.RequestOp) clientv3.Op {\n\tswitch tv := union.Request.(type) {\n\tcase *pb.RequestOp_RequestRange:\n\t\tif tv.RequestRange != nil {\n\t\t\treturn RangeRequestToOp(tv.RequestRange)\n\t\t}\n\tcase *pb.RequestOp_RequestPut:\n\t\tif tv.RequestPut != nil {\n\t\t\treturn PutRequestToOp(tv.RequestPut)\n\t\t}\n\tcase *pb.RequestOp_RequestDeleteRange:\n\t\tif tv.RequestDeleteRange != nil {\n\t\t\treturn DelRequestToOp(tv.RequestDeleteRange)\n\t\t}\n\t}\n\tpanic(\"unknown request\")\n}\n\nfunc RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {\n\topts := []clientv3.OpOption{}\n\tif len(r.RangeEnd) != 0 {\n\t\topts = append(opts, clientv3.WithRange(string(r.RangeEnd)))\n\t}\n\topts = append(opts, clientv3.WithRev(r.Revision))\n\topts = append(opts, clientv3.WithLimit(r.Limit))\n\topts = append(opts, clientv3.WithSort(\n\t\tclientv3.SortTarget(r.SortTarget),\n\t\tclientv3.SortOrder(r.SortOrder)),\n\t)\n\n\tif r.Serializable {\n\t\topts = append(opts, clientv3.WithSerializable())\n\t}\n\n\treturn clientv3.OpGet(string(r.Key), opts...)\n}\n\nfunc PutRequestToOp(r *pb.PutRequest) clientv3.Op {\n\topts := []clientv3.OpOption{}\n\topts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))\n\n\treturn clientv3.OpPut(string(r.Key), string(r.Value), opts...)\n}\n\nfunc DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {\n\topts := []clientv3.OpOption{}\n\tif len(r.RangeEnd) != 0 {\n\t\topts = append(opts, clientv3.WithRange(string(r.RangeEnd)))\n\t}\n\n\treturn clientv3.OpDelete(string(r.Key), opts...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage proxy\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/cloudsql-proxy\/logging\"\n)\n\nconst (\n\tDefaultRefreshCfgThrottle = time.Minute\n\tkeepAlivePeriod = time.Minute\n)\n\n\/\/ errNotCached is returned when the instance was not found in the Client's\n\/\/ cache. It is an internal detail and is not actually ever returned to the\n\/\/ user.\nvar errNotCached = errors.New(\"instance was not found in cache\")\n\n\/\/ Conn represents a connection from a client to a specific instance.\ntype Conn struct {\n\tInstance string\n\tConn net.Conn\n}\n\n\/\/ CertSource is how a Client obtains various certificates required for operation.\ntype CertSource interface {\n\t\/\/ Local returns a certificate that can be used to authenticate with the\n\t\/\/ provided instance.\n\tLocal(instance string) (tls.Certificate, error)\n\t\/\/ Remote returns the instance's CA certificate, address, and name.\n\tRemote(instance string) (cert *x509.Certificate, addr, name string, err error)\n}\n\n\/\/ Client is a type to handle connecting to a Server. All fields are required\n\/\/ unless otherwise specified.\ntype Client struct {\n\t\/\/ Port designates which remote port should be used when connecting to\n\t\/\/ instances. This value is defined by the server-side code, but for now it\n\t\/\/ should always be 3307.\n\tPort int\n\t\/\/ Required; specifies how certificates are obtained.\n\tCerts CertSource\n\t\/\/ Optionally tracks connections through this client. If nil, connections\n\t\/\/ are not tracked and will not be closed before method Run exits.\n\tConns *ConnSet\n\t\/\/ Dialer should return a new connection to the provided address. It is\n\t\/\/ called on each new connection to an instance. net.Dial will be used if\n\t\/\/ left nil.\n\tDialer func(net, addr string) (net.Conn, error)\n\n\t\/\/ RefreshCfgThrottle is the amount of time to wait between configuration\n\t\/\/ refreshes. If not set, it defaults to 1 minute.\n\t\/\/\n\t\/\/ This is to prevent quota exhaustion in the case of client-side\n\t\/\/ malfunction.\n\tRefreshCfgThrottle time.Duration\n\n\t\/\/ The cfgCache holds the most recent connection configuration keyed by\n\t\/\/ instance. Relevant functions are refreshCfg and cachedCfg. It is\n\t\/\/ protected by cfgL.\n\tcfgCache map[string]cacheEntry\n\tcfgL sync.RWMutex\n\n\t\/\/ MaxConnections is the maximum number of connections to establish\n\t\/\/ before refusing new connections. 0 means no limit.\n\tMaxConnections uint64\n\n\t\/\/ ConnectionsCounter is used to enforce the optional maxConnections limit\n\tConnectionsCounter uint64\n\tConnectionsCounterL sync.RWMutex\n}\n\ntype cacheEntry struct {\n\tlastRefreshed time.Time\n\t\/\/ If err is not nil, the addr and cfg are not valid.\n\terr error\n\taddr string\n\tcfg *tls.Config\n}\n\n\/\/ Run causes the client to start waiting for new connections to connSrc and\n\/\/ proxy them to the destination instance. It blocks until connSrc is closed.\nfunc (c *Client) Run(connSrc <-chan Conn) {\n\tfor conn := range connSrc {\n\t\tgo c.handleConn(conn)\n\t}\n\n\tif err := c.Conns.Close(); err != nil {\n\t\tlogging.Errorf(\"closing client had error: %v\", err)\n\t}\n}\n\nfunc (c *Client) handleConn(conn Conn) {\n\t\/\/ Track connections count only if a maximum connections limit is set to avoid useless overhead\n\tif c.MaxConnections > 0 {\n\t\tactive := atomic.AddUint64(&c.ConnectionsCounter, 1)\n\n\t\t\/\/ Deferred decrement of ConnectionsCounter upon connection closing\n\t\tdefer atomic.AddUint64(&c.ConnectionsCounter, ^uint64(0))\n\n\t\tif active > c.MaxConnections {\n\t\t\tlogging.Errorf(\"too many open connections (max %d)\", c.MaxConnections)\n\t\t\tconn.Conn.Close()\n\t\t\treturn\n\t\t}\n\t}\n\n\tserver, err := c.Dial(conn.Instance)\n\tif err != nil {\n\t\tlogging.Errorf(\"couldn't connect to %q: %v\", conn.Instance, err)\n\t\tconn.Conn.Close()\n\t\treturn\n\t}\n\n\tif false {\n\t\t\/\/ Log the connection's traffic via the debug connection if we're in a\n\t\t\/\/ verbose mode. Note that this is the unencrypted traffic stream.\n\t\tconn.Conn = dbgConn{conn.Conn}\n\t}\n\n\tc.Conns.Add(conn.Instance, conn.Conn)\n\tcopyThenClose(server, conn.Conn, conn.Instance, \"local connection on \"+conn.Conn.LocalAddr().String())\n\n\tif err := c.Conns.Remove(conn.Instance, conn.Conn); err != nil {\n\t\tlogging.Errorf(\"%s\", err)\n\t}\n}\n\n\/\/ refreshCfg uses the CertSource inside the Client to find the instance's\n\/\/ address as well as construct a new tls.Config to connect to the instance. It\n\/\/ caches the result.\nfunc (c *Client) refreshCfg(instance string) (addr string, cfg *tls.Config, err error) {\n\tc.cfgL.Lock()\n\tdefer c.cfgL.Unlock()\n\n\tthrottle := c.RefreshCfgThrottle\n\tif throttle == 0 {\n\t\tthrottle = DefaultRefreshCfgThrottle\n\t}\n\n\tif old := c.cfgCache[instance]; time.Since(old.lastRefreshed) < throttle {\n\t\tlogging.Errorf(\"Throttling refreshCfg(%s): it was only called %v ago\", instance, time.Since(old.lastRefreshed))\n\t\t\/\/ Refresh was called too recently, just reuse the result.\n\t\treturn old.addr, old.cfg, old.err\n\t}\n\n\tif c.cfgCache == nil {\n\t\tc.cfgCache = make(map[string]cacheEntry)\n\t}\n\n\tdefer func() {\n\t\tc.cfgCache[instance] = cacheEntry{\n\t\t\tlastRefreshed: time.Now(),\n\n\t\t\terr: err,\n\t\t\taddr: addr,\n\t\t\tcfg: cfg,\n\t\t}\n\t}()\n\n\tmycert, err := c.Certs.Local(instance)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tscert, addr, name, err := c.Certs.Remote(instance)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tcerts := x509.NewCertPool()\n\tcerts.AddCert(scert)\n\n\tcfg = &tls.Config{\n\t\tServerName: name,\n\t\tCertificates: []tls.Certificate{mycert},\n\t\tRootCAs: certs,\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", addr, c.Port), cfg, nil\n}\n\nfunc (c *Client) cachedCfg(instance string) (string, *tls.Config) {\n\tc.cfgL.RLock()\n\tret, ok := c.cfgCache[instance]\n\tc.cfgL.RUnlock()\n\n\t\/\/ Don't waste time returning an expired\/invalid cert.\n\tif !ok || ret.err != nil || time.Now().After(ret.cfg.Certificates[0].Leaf.NotAfter) {\n\t\treturn \"\", nil\n\t}\n\treturn ret.addr, ret.cfg\n}\n\n\/\/ Dial uses the configuration stored in the client to connect to an instance.\n\/\/ If this func returns a nil error the connection is correctly authenticated\n\/\/ to connect to the instance.\nfunc (c *Client) Dial(instance string) (net.Conn, error) {\n\tif addr, cfg := c.cachedCfg(instance); cfg != nil {\n\t\tret, err := c.tryConnect(addr, cfg)\n\t\tif err == nil {\n\t\t\treturn ret, err\n\t\t}\n\t}\n\n\taddr, cfg, err := c.refreshCfg(instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.tryConnect(addr, cfg)\n}\n\nfunc (c *Client) tryConnect(addr string, cfg *tls.Config) (net.Conn, error) {\n\td := c.Dialer\n\tif d == nil {\n\t\td = net.Dial\n\t}\n\tconn, err := d(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttype setKeepAliver interface {\n\t\tSetKeepAlive(keepalive bool) error\n\t\tSetKeepAlivePeriod(d time.Duration) error\n\t}\n\n\tif s, ok := conn.(setKeepAliver); ok {\n\t\tif err := s.SetKeepAlive(true); err != nil {\n\t\t\tlogging.Verbosef(\"Couldn't set KeepAlive to true: %v\", err)\n\t\t} else if err := s.SetKeepAlivePeriod(keepAlivePeriod); err != nil {\n\t\t\tlogging.Verbosef(\"Couldn't set KeepAlivePeriod to %v\", keepAlivePeriod)\n\t\t}\n\t} else {\n\t\tlogging.Verbosef(\"KeepAlive not supported: long-running tcp connections may be killed by the OS.\")\n\t}\n\n\tret := tls.Client(conn, cfg)\n\tif err := ret.Handshake(); err != nil {\n\t\tret.Close()\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\n\/\/ NewConnSrc returns a chan which can be used to receive connections\n\/\/ on the passed Listener. All requests sent to the returned chan will have the\n\/\/ instance name provided here. The chan will be closed if the Listener returns\n\/\/ an error.\nfunc NewConnSrc(instance string, l net.Listener) <-chan Conn {\n\tch := make(chan Conn)\n\tgo func() {\n\t\tfor {\n\t\t\tc, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlogging.Errorf(\"listener (%#v) had error: %v\", l, err)\n\t\t\t\tl.Close()\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tch <- Conn{instance, c}\n\t\t}\n\t}()\n\treturn ch\n}\n<commit_msg>Remove unused mutex<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage proxy\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/cloudsql-proxy\/logging\"\n)\n\nconst (\n\tDefaultRefreshCfgThrottle = time.Minute\n\tkeepAlivePeriod = time.Minute\n)\n\n\/\/ errNotCached is returned when the instance was not found in the Client's\n\/\/ cache. It is an internal detail and is not actually ever returned to the\n\/\/ user.\nvar errNotCached = errors.New(\"instance was not found in cache\")\n\n\/\/ Conn represents a connection from a client to a specific instance.\ntype Conn struct {\n\tInstance string\n\tConn net.Conn\n}\n\n\/\/ CertSource is how a Client obtains various certificates required for operation.\ntype CertSource interface {\n\t\/\/ Local returns a certificate that can be used to authenticate with the\n\t\/\/ provided instance.\n\tLocal(instance string) (tls.Certificate, error)\n\t\/\/ Remote returns the instance's CA certificate, address, and name.\n\tRemote(instance string) (cert *x509.Certificate, addr, name string, err error)\n}\n\n\/\/ Client is a type to handle connecting to a Server. All fields are required\n\/\/ unless otherwise specified.\ntype Client struct {\n\t\/\/ Port designates which remote port should be used when connecting to\n\t\/\/ instances. This value is defined by the server-side code, but for now it\n\t\/\/ should always be 3307.\n\tPort int\n\t\/\/ Required; specifies how certificates are obtained.\n\tCerts CertSource\n\t\/\/ Optionally tracks connections through this client. If nil, connections\n\t\/\/ are not tracked and will not be closed before method Run exits.\n\tConns *ConnSet\n\t\/\/ Dialer should return a new connection to the provided address. It is\n\t\/\/ called on each new connection to an instance. net.Dial will be used if\n\t\/\/ left nil.\n\tDialer func(net, addr string) (net.Conn, error)\n\n\t\/\/ RefreshCfgThrottle is the amount of time to wait between configuration\n\t\/\/ refreshes. If not set, it defaults to 1 minute.\n\t\/\/\n\t\/\/ This is to prevent quota exhaustion in the case of client-side\n\t\/\/ malfunction.\n\tRefreshCfgThrottle time.Duration\n\n\t\/\/ The cfgCache holds the most recent connection configuration keyed by\n\t\/\/ instance. Relevant functions are refreshCfg and cachedCfg. It is\n\t\/\/ protected by cfgL.\n\tcfgCache map[string]cacheEntry\n\tcfgL sync.RWMutex\n\n\t\/\/ MaxConnections is the maximum number of connections to establish\n\t\/\/ before refusing new connections. 0 means no limit.\n\tMaxConnections uint64\n\n\t\/\/ ConnectionsCounter is used to enforce the optional maxConnections limit\n\tConnectionsCounter uint64\n}\n\ntype cacheEntry struct {\n\tlastRefreshed time.Time\n\t\/\/ If err is not nil, the addr and cfg are not valid.\n\terr error\n\taddr string\n\tcfg *tls.Config\n}\n\n\/\/ Run causes the client to start waiting for new connections to connSrc and\n\/\/ proxy them to the destination instance. It blocks until connSrc is closed.\nfunc (c *Client) Run(connSrc <-chan Conn) {\n\tfor conn := range connSrc {\n\t\tgo c.handleConn(conn)\n\t}\n\n\tif err := c.Conns.Close(); err != nil {\n\t\tlogging.Errorf(\"closing client had error: %v\", err)\n\t}\n}\n\nfunc (c *Client) handleConn(conn Conn) {\n\t\/\/ Track connections count only if a maximum connections limit is set to avoid useless overhead\n\tif c.MaxConnections > 0 {\n\t\tactive := atomic.AddUint64(&c.ConnectionsCounter, 1)\n\n\t\t\/\/ Deferred decrement of ConnectionsCounter upon connection closing\n\t\tdefer atomic.AddUint64(&c.ConnectionsCounter, ^uint64(0))\n\n\t\tif active > c.MaxConnections {\n\t\t\tlogging.Errorf(\"too many open connections (max %d)\", c.MaxConnections)\n\t\t\tconn.Conn.Close()\n\t\t\treturn\n\t\t}\n\t}\n\n\tserver, err := c.Dial(conn.Instance)\n\tif err != nil {\n\t\tlogging.Errorf(\"couldn't connect to %q: %v\", conn.Instance, err)\n\t\tconn.Conn.Close()\n\t\treturn\n\t}\n\n\tif false {\n\t\t\/\/ Log the connection's traffic via the debug connection if we're in a\n\t\t\/\/ verbose mode. Note that this is the unencrypted traffic stream.\n\t\tconn.Conn = dbgConn{conn.Conn}\n\t}\n\n\tc.Conns.Add(conn.Instance, conn.Conn)\n\tcopyThenClose(server, conn.Conn, conn.Instance, \"local connection on \"+conn.Conn.LocalAddr().String())\n\n\tif err := c.Conns.Remove(conn.Instance, conn.Conn); err != nil {\n\t\tlogging.Errorf(\"%s\", err)\n\t}\n}\n\n\/\/ refreshCfg uses the CertSource inside the Client to find the instance's\n\/\/ address as well as construct a new tls.Config to connect to the instance. It\n\/\/ caches the result.\nfunc (c *Client) refreshCfg(instance string) (addr string, cfg *tls.Config, err error) {\n\tc.cfgL.Lock()\n\tdefer c.cfgL.Unlock()\n\n\tthrottle := c.RefreshCfgThrottle\n\tif throttle == 0 {\n\t\tthrottle = DefaultRefreshCfgThrottle\n\t}\n\n\tif old := c.cfgCache[instance]; time.Since(old.lastRefreshed) < throttle {\n\t\tlogging.Errorf(\"Throttling refreshCfg(%s): it was only called %v ago\", instance, time.Since(old.lastRefreshed))\n\t\t\/\/ Refresh was called too recently, just reuse the result.\n\t\treturn old.addr, old.cfg, old.err\n\t}\n\n\tif c.cfgCache == nil {\n\t\tc.cfgCache = make(map[string]cacheEntry)\n\t}\n\n\tdefer func() {\n\t\tc.cfgCache[instance] = cacheEntry{\n\t\t\tlastRefreshed: time.Now(),\n\n\t\t\terr: err,\n\t\t\taddr: addr,\n\t\t\tcfg: cfg,\n\t\t}\n\t}()\n\n\tmycert, err := c.Certs.Local(instance)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tscert, addr, name, err := c.Certs.Remote(instance)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tcerts := x509.NewCertPool()\n\tcerts.AddCert(scert)\n\n\tcfg = &tls.Config{\n\t\tServerName: name,\n\t\tCertificates: []tls.Certificate{mycert},\n\t\tRootCAs: certs,\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", addr, c.Port), cfg, nil\n}\n\nfunc (c *Client) cachedCfg(instance string) (string, *tls.Config) {\n\tc.cfgL.RLock()\n\tret, ok := c.cfgCache[instance]\n\tc.cfgL.RUnlock()\n\n\t\/\/ Don't waste time returning an expired\/invalid cert.\n\tif !ok || ret.err != nil || time.Now().After(ret.cfg.Certificates[0].Leaf.NotAfter) {\n\t\treturn \"\", nil\n\t}\n\treturn ret.addr, ret.cfg\n}\n\n\/\/ Dial uses the configuration stored in the client to connect to an instance.\n\/\/ If this func returns a nil error the connection is correctly authenticated\n\/\/ to connect to the instance.\nfunc (c *Client) Dial(instance string) (net.Conn, error) {\n\tif addr, cfg := c.cachedCfg(instance); cfg != nil {\n\t\tret, err := c.tryConnect(addr, cfg)\n\t\tif err == nil {\n\t\t\treturn ret, err\n\t\t}\n\t}\n\n\taddr, cfg, err := c.refreshCfg(instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.tryConnect(addr, cfg)\n}\n\nfunc (c *Client) tryConnect(addr string, cfg *tls.Config) (net.Conn, error) {\n\td := c.Dialer\n\tif d == nil {\n\t\td = net.Dial\n\t}\n\tconn, err := d(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttype setKeepAliver interface {\n\t\tSetKeepAlive(keepalive bool) error\n\t\tSetKeepAlivePeriod(d time.Duration) error\n\t}\n\n\tif s, ok := conn.(setKeepAliver); ok {\n\t\tif err := s.SetKeepAlive(true); err != nil {\n\t\t\tlogging.Verbosef(\"Couldn't set KeepAlive to true: %v\", err)\n\t\t} else if err := s.SetKeepAlivePeriod(keepAlivePeriod); err != nil {\n\t\t\tlogging.Verbosef(\"Couldn't set KeepAlivePeriod to %v\", keepAlivePeriod)\n\t\t}\n\t} else {\n\t\tlogging.Verbosef(\"KeepAlive not supported: long-running tcp connections may be killed by the OS.\")\n\t}\n\n\tret := tls.Client(conn, cfg)\n\tif err := ret.Handshake(); err != nil {\n\t\tret.Close()\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\n\/\/ NewConnSrc returns a chan which can be used to receive connections\n\/\/ on the passed Listener. All requests sent to the returned chan will have the\n\/\/ instance name provided here. The chan will be closed if the Listener returns\n\/\/ an error.\nfunc NewConnSrc(instance string, l net.Listener) <-chan Conn {\n\tch := make(chan Conn)\n\tgo func() {\n\t\tfor {\n\t\t\tc, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlogging.Errorf(\"listener (%#v) had error: %v\", l, err)\n\t\t\t\tl.Close()\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tch <- Conn{instance, c}\n\t\t}\n\t}()\n\treturn ch\n}\n<|endoftext|>"} {"text":"<commit_before>package HarborAPItest\n\nimport (\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n \"github.com\/vmware\/harbor\/tests\/apitests\/apilib\"\n)\n\nfunc TestSearch(t *testing.T) {\n fmt.Println(\"Test for Search (SearchGet) API\\n\")\n\tassert := assert.New(t)\n\n\tapiTest := HarborAPI.NewHarborAPI()\n\tvar resault HarborAPI.Search\n resault, err := apiTest.SearchGet(\"library\")\n\t\/\/fmt.Printf(\"%+v\\n\", resault)\n\tif err != nil {\n\t\tt.Error(\"Error while search project or repository\", err.Error())\n\t\tt.Log(err)\n\t} else {\n\t\tassert.Equal(resault.Projects[0].ProjectId, int32(1), \"Project id should be equal\")\n\t\tassert.Equal(resault.Projects[0].ProjectName, \"library\", \"Project name should be library\")\n\t\tassert.Equal(resault.Projects[0].Public, int32(1), \"Project public status should be 1 (true)\")\n\t\t\/\/t.Log(resault)\n\t}\n\t\/\/if resault.Response.StatusCode != 200 {\n\t\/\/\tt.Log(resault.Response)\n\t\/\/}\n\n}\n<commit_msg>Update hbapisearch_test.go<commit_after>package HarborAPItest\n\nimport (\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n \"github.com\/vmware\/harbor\/tests\/apitests\/apilib\"\n)\n\nfunc TestSearch(t *testing.T) {\n fmt.Println(\"Test for Search (SearchGet) API\")\n\tassert := assert.New(t)\n\n\tapiTest := HarborAPI.NewHarborAPI()\n\tvar resault HarborAPI.Search\n resault, err := apiTest.SearchGet(\"library\")\n\t\/\/fmt.Printf(\"%+v\\n\", resault)\n\tif err != nil {\n\t\tt.Error(\"Error while search project or repository\", err.Error())\n\t\tt.Log(err)\n\t} else {\n\t\tassert.Equal(resault.Projects[0].ProjectId, int32(1), \"Project id should be equal\")\n\t\tassert.Equal(resault.Projects[0].ProjectName, \"library\", \"Project name should be library\")\n\t\tassert.Equal(resault.Projects[0].Public, int32(1), \"Project public status should be 1 (true)\")\n\t\t\/\/t.Log(resault)\n\t}\n\t\/\/if resault.Response.StatusCode != 200 {\n\t\/\/\tt.Log(resault.Response)\n\t\/\/}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package impl\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\/directory\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\/subspace\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\/tuple\"\n\t\"hash\/crc32\"\n\t\"strings\"\n\t\"vitessedata\/plugin\"\n)\n\ntype fdbctxt struct {\n\tdb fdb.Database\n\tdir directory.DirectorySubspace\n\tsubs [256]subspace.Subspace\n\tclusterFile string\n\tnhk int\n}\n\nfunc opendb(path []string) *fdbctxt {\n\tvar ctxt fdbctxt\n\n\tcf := flag.String(\"clusterfile\", \"\", \"fdb cluster file.\")\n\tnhk := flag.Int(\"nh\", 1, \"number of hash bucket column\")\n\tflag.Parse()\n\n\tctxt.clusterFile = *cf\n\tctxt.nhk = *nhk\n\n\tfdb.MustAPIVersion(510)\n\n\tif ctxt.clusterFile != \"\" {\n\t\t\/\/ For now, fdb only support one database \"DB\"\n\t\tctxt.db = fdb.MustOpen(ctxt.clusterFile, []byte(\"DB\"))\n\t} else {\n\t\tctxt.db = fdb.MustOpenDefault()\n\t}\n\n\tvar err error\n\tctxt.dir, err = directory.CreateOrOpen(ctxt.db, path, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor i := 0; i < 256; i++ {\n\t\tctxt.subs[i] = ctxt.dir.Sub([]byte{byte(i)})\n\t}\n\treturn &ctxt\n}\n\nfunc buildTuple(vs []interface{}) tuple.Tuple {\n\ttup := make([]tuple.TupleElement, len(vs))\n\tfor idx, v := range vs {\n\t\ttup[idx] = v.(tuple.TupleElement)\n\t}\n\treturn tup\n}\n\nfunc (ctxt *fdbctxt) buildKey(t tuple.Tuple) (fdb.Key, byte) {\n\tnhk := ctxt.nhk\n\tif nhk == 0 {\n\t\tnhk = len(t)\n\t}\n\n\tkb := t[:nhk].Pack()\n\tbkt := byte(crc32.ChecksumIEEE(kb))\n\tkey := ctxt.subs[bkt].Pack(t)\n\n\tplugin.DbgLog(\"Build key: %v -> %v, at bkt %d.\", t, key, bkt)\n\treturn key, bkt\n}\n\nfunc (ctxt *fdbctxt) buildBktKey(bkt byte, t tuple.Tuple) fdb.Key {\n\tif t == nil || len(t) == 0 {\n\t\treturn ctxt.subs[bkt].FDBKey()\n\t}\n\treturn ctxt.subs[bkt].Pack(t)\n}\n\nfunc (ctxt *fdbctxt) parseKeyValue(bkt byte, kv fdb.KeyValue) (tuple.Tuple, tuple.Tuple, error) {\n\tkt, err := ctxt.subs[bkt].Unpack(kv.Key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvt, err := tuple.Unpack(kv.Value)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn kt, vt, nil\n}\n\nfunc (ctxt *fdbctxt) buildRange(bkt byte, ta, tz tuple.Tuple) fdb.KeyRange {\n\tka := ctxt.buildBktKey(bkt, ta)\n\tkra, _ := fdb.PrefixRange(ka)\n\tkz := ctxt.buildBktKey(bkt, tz)\n\tkrz, _ := fdb.PrefixRange(kz)\n\treturn fdb.KeyRange{kra.Begin, krz.End}\n}\n\nfunc (ctxt *fdbctxt) ins(tr fdb.Transaction, kt, vt tuple.Tuple) {\n\tk, _ := ctxt.buildKey(kt)\n\tv := vt.Pack()\n\ttr.Set(k, v)\n}\n\nfunc (ctxt *fdbctxt) del(tr fdb.Transaction, kt tuple.Tuple) {\n\tk, _ := ctxt.buildKey(kt)\n\ttr.Clear(k)\n}\n\nfunc (ctxt *fdbctxt) get(tr fdb.Transaction, kt tuple.Tuple) (tuple.Tuple, error) {\n\tk, _ := ctxt.buildKey(kt)\n\tba := tr.Get(k).MustGet()\n\treturn tuple.Unpack(ba)\n}\n\nfunc decodeReqPath(path string) ([]string, []string, []string, error) {\n\t\/\/ path from request should be format mountpoint\/dir\/dir\/key1,key2:val1,val2,val3\n\t\/\/ remove mount point, then return path splited by \"\/\"\n\tidx := strings.Index(path[1:], \"\/\")\n\tstrs := strings.Split(path[idx+1:], \"\/\")\n\n\tif len(strs) < 2 {\n\t\treturn nil, nil, nil, fmt.Errorf(\"FDB path %v is not a valid format.\", path)\n\t}\n\n\tdirpath := strs[:len(strs)-1]\n\tkvstrs := strings.Split(strs[len(strs)-1], \":\")\n\tif len(kvstrs) != 2 {\n\t\treturn nil, nil, nil, fmt.Errorf(\"FDB path %v is not a valid format.\", path)\n\t}\n\treturn dirpath, strings.Split(kvstrs[0], \",\"), strings.Split(kvstrs[1], \",\"), nil\n}\n<commit_msg>Reduce log verbosity.<commit_after>package impl\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\/directory\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\/subspace\"\n\t\"github.com\/apple\/foundationdb\/bindings\/go\/src\/fdb\/tuple\"\n\t\"hash\/crc32\"\n\t\"strings\"\n\t\"vitessedata\/plugin\"\n)\n\ntype fdbctxt struct {\n\tdb fdb.Database\n\tdir directory.DirectorySubspace\n\tsubs [256]subspace.Subspace\n\tclusterFile string\n\tnhk int\n}\n\nfunc opendb(path []string) *fdbctxt {\n\tvar ctxt fdbctxt\n\n\tcf := flag.String(\"clusterfile\", \"\", \"fdb cluster file.\")\n\tnhk := flag.Int(\"nh\", 1, \"number of hash bucket column\")\n\tflag.Parse()\n\tctxt.clusterFile = *cf\n\tctxt.nhk = *nhk\n\tplugin.DbgLog(\"Opening database with cf %s, nhk %d.\", *cf, *nhk)\n\n\tfdb.MustAPIVersion(510)\n\n\tif ctxt.clusterFile != \"\" {\n\t\t\/\/ For now, fdb only support one database \"DB\"\n\t\tctxt.db = fdb.MustOpen(ctxt.clusterFile, []byte(\"DB\"))\n\t} else {\n\t\tctxt.db = fdb.MustOpenDefault()\n\t}\n\n\tvar err error\n\tctxt.dir, err = directory.CreateOrOpen(ctxt.db, path, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor i := 0; i < 256; i++ {\n\t\tctxt.subs[i] = ctxt.dir.Sub([]byte{byte(i)})\n\t}\n\treturn &ctxt\n}\n\nfunc buildTuple(vs []interface{}) tuple.Tuple {\n\ttup := make([]tuple.TupleElement, len(vs))\n\tfor idx, v := range vs {\n\t\ttup[idx] = v.(tuple.TupleElement)\n\t}\n\treturn tup\n}\n\nfunc (ctxt *fdbctxt) buildKey(t tuple.Tuple) (fdb.Key, byte) {\n\tnhk := ctxt.nhk\n\tif nhk == 0 {\n\t\tnhk = len(t)\n\t}\n\n\tkb := t[:nhk].Pack()\n\tbkt := byte(crc32.ChecksumIEEE(kb))\n\tkey := ctxt.subs[bkt].Pack(t)\n\n\t\/\/ plugin.DbgLog(\"Build key: %v -> %v, at bkt %d.\", t, key, bkt)\n\treturn key, bkt\n}\n\nfunc (ctxt *fdbctxt) buildBktKey(bkt byte, t tuple.Tuple) fdb.Key {\n\tif t == nil || len(t) == 0 {\n\t\treturn ctxt.subs[bkt].FDBKey()\n\t}\n\treturn ctxt.subs[bkt].Pack(t)\n}\n\nfunc (ctxt *fdbctxt) parseKeyValue(bkt byte, kv fdb.KeyValue) (tuple.Tuple, tuple.Tuple, error) {\n\tkt, err := ctxt.subs[bkt].Unpack(kv.Key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvt, err := tuple.Unpack(kv.Value)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn kt, vt, nil\n}\n\nfunc (ctxt *fdbctxt) buildRange(bkt byte, ta, tz tuple.Tuple) fdb.KeyRange {\n\tka := ctxt.buildBktKey(bkt, ta)\n\tkra, _ := fdb.PrefixRange(ka)\n\tkz := ctxt.buildBktKey(bkt, tz)\n\tkrz, _ := fdb.PrefixRange(kz)\n\treturn fdb.KeyRange{kra.Begin, krz.End}\n}\n\nfunc (ctxt *fdbctxt) ins(tr fdb.Transaction, kt, vt tuple.Tuple) {\n\tk, _ := ctxt.buildKey(kt)\n\tv := vt.Pack()\n\ttr.Set(k, v)\n}\n\nfunc (ctxt *fdbctxt) del(tr fdb.Transaction, kt tuple.Tuple) {\n\tk, _ := ctxt.buildKey(kt)\n\ttr.Clear(k)\n}\n\nfunc (ctxt *fdbctxt) get(tr fdb.Transaction, kt tuple.Tuple) (tuple.Tuple, error) {\n\tk, _ := ctxt.buildKey(kt)\n\tba := tr.Get(k).MustGet()\n\treturn tuple.Unpack(ba)\n}\n\nfunc decodeReqPath(path string) ([]string, []string, []string, error) {\n\t\/\/ path from request should be format mountpoint\/dir\/dir\/key1,key2:val1,val2,val3\n\t\/\/ remove mount point, then return path splited by \"\/\"\n\tidx := strings.Index(path[1:], \"\/\")\n\tstrs := strings.Split(path[idx+1:], \"\/\")\n\n\tif len(strs) < 2 {\n\t\treturn nil, nil, nil, fmt.Errorf(\"FDB path %v is not a valid format.\", path)\n\t}\n\n\tdirpath := strs[:len(strs)-1]\n\tkvstrs := strings.Split(strs[len(strs)-1], \":\")\n\tif len(kvstrs) != 2 {\n\t\treturn nil, nil, nil, fmt.Errorf(\"FDB path %v is not a valid format.\", path)\n\t}\n\treturn dirpath, strings.Split(kvstrs[0], \",\"), strings.Split(kvstrs[1], \",\"), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package defaults\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/volatiletech\/authboss\"\n)\n\nconst (\n\t\/\/ RedirectFormValueName is the name of the form field\n\t\/\/ in the http request that will be used when redirecting\n\tRedirectFormValueName = \"redir\"\n)\n\n\/\/ Responder helps respond to http requests\ntype Responder struct {\n\tRenderer authboss.Renderer\n}\n\n\/\/ NewResponder constructor\nfunc NewResponder(renderer authboss.Renderer) *Responder {\n\treturn &Responder{Renderer: renderer}\n}\n\n\/\/ Respond to an HTTP request. It's main job is to merge data that comes in from\n\/\/ various middlewares via the context with the data sent by the controller and render that.\nfunc (r *Responder) Respond(w http.ResponseWriter, req *http.Request, code int, page string, data authboss.HTMLData) error {\n\tctxData := req.Context().Value(authboss.CTXKeyData)\n\tif ctxData != nil {\n\t\tdata.Merge(ctxData.(authboss.HTMLData))\n\t}\n\n\trendered, mime, err := r.Renderer.Render(req.Context(), page, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", mime)\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(rendered)\n\treturn err\n}\n\nfunc isAPIRequest(r *http.Request) bool {\n\treturn r.Header.Get(\"Content-Type\") == \"application\/json\"\n}\n\n\/\/ Redirector for http requests\ntype Redirector struct {\n\tRenderer authboss.Renderer\n\n\t\/\/ FormValueName for the redirection\n\tFormValueName string\n}\n\n\/\/ NewRedirector constructor\nfunc NewRedirector(renderer authboss.Renderer, formValueName string) *Redirector {\n\treturn &Redirector{FormValueName: formValueName, Renderer: renderer}\n}\n\n\/\/ Redirect the client elsewhere. If it's an API request it will simply render\n\/\/ a JSON response with information that should help a client to decide what\n\/\/ to do.\nfunc (r *Redirector) Redirect(w http.ResponseWriter, req *http.Request, ro authboss.RedirectOptions) error {\n\tvar redirectFunction = r.redirectNonAPI\n\tif isAPIRequest(req) {\n\t\tredirectFunction = r.redirectAPI\n\t}\n\n\treturn redirectFunction(w, req, ro)\n}\n\nfunc (r Redirector) redirectAPI(w http.ResponseWriter, req *http.Request, ro authboss.RedirectOptions) error {\n\tpath := ro.RedirectPath\n\tredir := req.FormValue(r.FormValueName)\n\tif len(redir) != 0 && ro.FollowRedirParam {\n\t\tpath = redir\n\t}\n\n\tvar status, message string\n\tif len(ro.Success) != 0 {\n\t\tstatus = \"success\"\n\t\tmessage = ro.Success\n\t}\n\tif len(ro.Failure) != 0 {\n\t\tstatus = \"failure\"\n\t\tmessage = ro.Failure\n\t}\n\n\tdata := authboss.HTMLData{\n\t\t\"location\": path,\n\t}\n\n\tif len(status) != 0 {\n\t\tdata[\"status\"] = status\n\t\tdata[\"message\"] = message\n\t}\n\n\tbody, mime, err := r.Renderer.Render(req.Context(), \"redirect\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(body) != 0 {\n\t\tw.Header().Set(\"Content-Type\", mime)\n\t}\n\n\tif ro.Code != 0 {\n\t\tw.WriteHeader(ro.Code)\n\t}\n\t_, err = w.Write(body)\n\treturn err\n}\n\nfunc (r Redirector) redirectNonAPI(w http.ResponseWriter, req *http.Request, ro authboss.RedirectOptions) error {\n\tpath := ro.RedirectPath\n\tredir := req.FormValue(r.FormValueName)\n\tif len(redir) != 0 && ro.FollowRedirParam {\n\t\tpath = redir\n\t}\n\n\tif len(ro.Success) != 0 {\n\t\tauthboss.PutSession(w, authboss.FlashSuccessKey, ro.Success)\n\t}\n\tif len(ro.Failure) != 0 {\n\t\tauthboss.PutSession(w, authboss.FlashErrorKey, ro.Failure)\n\t}\n\n\thttp.Redirect(w, req, path, http.StatusFound)\n\treturn nil\n}\n<commit_msg>Fix nil bug in responder<commit_after>package defaults\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/volatiletech\/authboss\"\n)\n\nconst (\n\t\/\/ RedirectFormValueName is the name of the form field\n\t\/\/ in the http request that will be used when redirecting\n\tRedirectFormValueName = \"redir\"\n)\n\n\/\/ Responder helps respond to http requests\ntype Responder struct {\n\tRenderer authboss.Renderer\n}\n\n\/\/ NewResponder constructor\nfunc NewResponder(renderer authboss.Renderer) *Responder {\n\treturn &Responder{Renderer: renderer}\n}\n\n\/\/ Respond to an HTTP request. It's main job is to merge data that comes in from\n\/\/ various middlewares via the context with the data sent by the controller and render that.\nfunc (r *Responder) Respond(w http.ResponseWriter, req *http.Request, code int, page string, data authboss.HTMLData) error {\n\tctxData := req.Context().Value(authboss.CTXKeyData)\n\tif ctxData != nil {\n\t\tif data == nil {\n\t\t\tdata = authboss.HTMLData{}\n\t\t}\n\t\tdata.Merge(ctxData.(authboss.HTMLData))\n\t}\n\n\trendered, mime, err := r.Renderer.Render(req.Context(), page, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", mime)\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(rendered)\n\treturn err\n}\n\nfunc isAPIRequest(r *http.Request) bool {\n\treturn r.Header.Get(\"Content-Type\") == \"application\/json\"\n}\n\n\/\/ Redirector for http requests\ntype Redirector struct {\n\tRenderer authboss.Renderer\n\n\t\/\/ FormValueName for the redirection\n\tFormValueName string\n}\n\n\/\/ NewRedirector constructor\nfunc NewRedirector(renderer authboss.Renderer, formValueName string) *Redirector {\n\treturn &Redirector{FormValueName: formValueName, Renderer: renderer}\n}\n\n\/\/ Redirect the client elsewhere. If it's an API request it will simply render\n\/\/ a JSON response with information that should help a client to decide what\n\/\/ to do.\nfunc (r *Redirector) Redirect(w http.ResponseWriter, req *http.Request, ro authboss.RedirectOptions) error {\n\tvar redirectFunction = r.redirectNonAPI\n\tif isAPIRequest(req) {\n\t\tredirectFunction = r.redirectAPI\n\t}\n\n\treturn redirectFunction(w, req, ro)\n}\n\nfunc (r Redirector) redirectAPI(w http.ResponseWriter, req *http.Request, ro authboss.RedirectOptions) error {\n\tpath := ro.RedirectPath\n\tredir := req.FormValue(r.FormValueName)\n\tif len(redir) != 0 && ro.FollowRedirParam {\n\t\tpath = redir\n\t}\n\n\tvar status, message string\n\tif len(ro.Success) != 0 {\n\t\tstatus = \"success\"\n\t\tmessage = ro.Success\n\t}\n\tif len(ro.Failure) != 0 {\n\t\tstatus = \"failure\"\n\t\tmessage = ro.Failure\n\t}\n\n\tdata := authboss.HTMLData{\n\t\t\"location\": path,\n\t}\n\n\tif len(status) != 0 {\n\t\tdata[\"status\"] = status\n\t\tdata[\"message\"] = message\n\t}\n\n\tbody, mime, err := r.Renderer.Render(req.Context(), \"redirect\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(body) != 0 {\n\t\tw.Header().Set(\"Content-Type\", mime)\n\t}\n\n\tif ro.Code != 0 {\n\t\tw.WriteHeader(ro.Code)\n\t}\n\t_, err = w.Write(body)\n\treturn err\n}\n\nfunc (r Redirector) redirectNonAPI(w http.ResponseWriter, req *http.Request, ro authboss.RedirectOptions) error {\n\tpath := ro.RedirectPath\n\tredir := req.FormValue(r.FormValueName)\n\tif len(redir) != 0 && ro.FollowRedirParam {\n\t\tpath = redir\n\t}\n\n\tif len(ro.Success) != 0 {\n\t\tauthboss.PutSession(w, authboss.FlashSuccessKey, ro.Success)\n\t}\n\tif len(ro.Failure) != 0 {\n\t\tauthboss.PutSession(w, authboss.FlashErrorKey, ro.Failure)\n\t}\n\n\thttp.Redirect(w, req, path, http.StatusFound)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package phi_test\n\nimport (\n\tlibphi \"github.com\/fate-lovely\/phi\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/ulule\/limiter\/v3\"\n\t\"github.com\/ulule\/limiter\/v3\/drivers\/middleware\/phi\"\n\t\"github.com\/ulule\/limiter\/v3\/drivers\/store\/memory\"\n\t\"github.com\/valyala\/fasthttp\"\n\t\"github.com\/valyala\/fasthttp\/fasthttputil\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n)\n\nfunc TestFasthttpMiddleware(t *testing.T) {\n\tis := require.New(t)\n\treq := fasthttp.AcquireRequest()\n\treq.Header.SetHost(\"localhost:8080\")\n\treq.Header.SetRequestURI(\"\/\")\n\n\tstore := memory.NewStore()\n\tis.NotZero(store)\n\n\trate, err := limiter.NewRateFromFormatted(\"10-M\")\n\tis.NoError(err)\n\tis.NotZero(rate)\n\n\tmiddleware := phi.NewMiddleware(limiter.New(store, rate))\n\n\trouter := libphi.NewRouter()\n\trouter.Use(middleware)\n\trouter.Get(\"\/\", func(ctx *fasthttp.RequestCtx) {\n\t\tctx.SetStatusCode(fasthttp.StatusOK)\n\t\tctx.SetBodyString(\"hello\")\n\t})\n\n\tsuccess := int64(10)\n\tclients := int64(100)\n\n\t\/\/\n\t\/\/ Sequential\n\t\/\/\n\n\tfor i := int64(1); i <= clients; i++ {\n\t\tresp := fasthttp.AcquireResponse()\n\t\terr := serve(router.ServeFastHTTP, req, resp)\n\t\tis.Nil(err)\n\n\t\tif i <= success {\n\t\t\tis.Equal(resp.StatusCode(), fasthttp.StatusOK)\n\t\t} else {\n\t\t\tis.Equal(resp.StatusCode(), fasthttp.StatusTooManyRequests)\n\t\t}\n\t}\n\n\t\/\/\n\t\/\/ Concurrent\n\t\/\/\n\n\tstore = memory.NewStore()\n\tis.NotZero(store)\n\n\tmiddleware = phi.NewMiddleware(limiter.New(store, rate))\n\n\trouter = libphi.NewRouter()\n\trouter.Use(middleware)\n\trouter.Get(\"\/\", func(ctx *fasthttp.RequestCtx) {\n\t\tctx.SetStatusCode(fasthttp.StatusOK)\n\t\tctx.SetBodyString(\"hello\")\n\t})\n\n\twg := &sync.WaitGroup{}\n\tcounter := int64(0)\n\n\tfor i := int64(1); i <= clients; i++ {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tresp := fasthttp.AcquireResponse()\n\t\t\terr := serve(router.ServeFastHTTP, req, resp)\n\t\t\tis.Nil(err)\n\n\t\t\tif resp.StatusCode() == fasthttp.StatusOK {\n\t\t\t\tatomic.AddInt64(&counter, 1)\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\twg.Wait()\n\tis.Equal(success, atomic.LoadInt64(&counter))\n\n\t\/\/\n\t\/\/ Custom KeyGetter\n\t\/\/\n\n\tstore = memory.NewStore()\n\tis.NotZero(store)\n\n\tj := 0\n\tKeyGetter := func(ctx *fasthttp.RequestCtx) string {\n\t\tj++\n\t\treturn strconv.Itoa(j)\n\t}\n\tmiddleware = phi.NewMiddleware(limiter.New(store, rate), phi.WithKeyGetter(KeyGetter))\n\n\tis.NotZero(middleware)\n\n\trouter = libphi.NewRouter()\n\trouter.Use(middleware)\n\trouter.Get(\"\/\", func(ctx *fasthttp.RequestCtx) {\n\t\tctx.SetStatusCode(fasthttp.StatusOK)\n\t\tctx.SetBodyString(\"hello\")\n\t})\n\n\tfor i := int64(1); i <= clients; i++ {\n\t\tresp := fasthttp.AcquireResponse()\n\t\terr := serve(router.ServeFastHTTP, req, resp)\n\t\tis.Nil(err)\n\t\tis.Equal(fasthttp.StatusOK, resp.StatusCode(), strconv.Itoa(int(i)))\n\t}\n}\n\nfunc serve(handler fasthttp.RequestHandler, req *fasthttp.Request, res *fasthttp.Response) error {\n\tln := fasthttputil.NewInmemoryListener()\n\tdefer ln.Close()\n\n\tgo func() {\n\t\terr := fasthttp.Serve(ln, handler)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tclient := fasthttp.Client{\n\t\tDial: func(addr string) (net.Conn, error) {\n\t\t\treturn ln.Dial()\n\t\t},\n\t}\n\n\treturn client.Do(req, res)\n}\n<commit_msg>Fix request host on phi middleware test<commit_after>package phi_test\n\nimport (\n\tlibphi \"github.com\/fate-lovely\/phi\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/ulule\/limiter\/v3\"\n\t\"github.com\/ulule\/limiter\/v3\/drivers\/middleware\/phi\"\n\t\"github.com\/ulule\/limiter\/v3\/drivers\/store\/memory\"\n\t\"github.com\/valyala\/fasthttp\"\n\t\"github.com\/valyala\/fasthttp\/fasthttputil\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n)\n\nfunc TestFasthttpMiddleware(t *testing.T) {\n\tis := require.New(t)\n\n\tstore := memory.NewStore()\n\tis.NotZero(store)\n\n\trate, err := limiter.NewRateFromFormatted(\"10-M\")\n\tis.NoError(err)\n\tis.NotZero(rate)\n\n\tmiddleware := phi.NewMiddleware(limiter.New(store, rate))\n\n\trouter := libphi.NewRouter()\n\trouter.Use(middleware)\n\trouter.Get(\"\/\", func(ctx *fasthttp.RequestCtx) {\n\t\tctx.SetStatusCode(fasthttp.StatusOK)\n\t\tctx.SetBodyString(\"hello\")\n\t})\n\n\tsuccess := int64(10)\n\tclients := int64(100)\n\n\t\/\/\n\t\/\/ Sequential\n\t\/\/\n\n\tfor i := int64(1); i <= clients; i++ {\n\t\tresp := fasthttp.AcquireResponse()\n\t\treq := fasthttp.AcquireRequest()\n\t\treq.Header.SetHost(\"localhost:8081\")\n\t\treq.Header.SetRequestURI(\"\/\")\n\t\terr := serve(router.ServeFastHTTP, req, resp)\n\t\tis.Nil(err)\n\n\t\tif i <= success {\n\t\t\tis.Equal(resp.StatusCode(), fasthttp.StatusOK)\n\t\t} else {\n\t\t\tis.Equal(resp.StatusCode(), fasthttp.StatusTooManyRequests)\n\t\t}\n\t}\n\n\t\/\/\n\t\/\/ Concurrent\n\t\/\/\n\n\tstore = memory.NewStore()\n\tis.NotZero(store)\n\n\tmiddleware = phi.NewMiddleware(limiter.New(store, rate))\n\n\trouter = libphi.NewRouter()\n\trouter.Use(middleware)\n\trouter.Get(\"\/\", func(ctx *fasthttp.RequestCtx) {\n\t\tctx.SetStatusCode(fasthttp.StatusOK)\n\t\tctx.SetBodyString(\"hello\")\n\t})\n\n\twg := &sync.WaitGroup{}\n\tcounter := int64(0)\n\n\tfor i := int64(1); i <= clients; i++ {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tresp := fasthttp.AcquireResponse()\n\t\t\treq := fasthttp.AcquireRequest()\n\t\t\treq.Header.SetHost(\"localhost:8081\")\n\t\t\treq.Header.SetRequestURI(\"\/\")\n\t\t\terr := serve(router.ServeFastHTTP, req, resp)\n\t\t\tis.Nil(err)\n\n\t\t\tif resp.StatusCode() == fasthttp.StatusOK {\n\t\t\t\tatomic.AddInt64(&counter, 1)\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\twg.Wait()\n\tis.Equal(success, atomic.LoadInt64(&counter))\n\n\t\/\/\n\t\/\/ Custom KeyGetter\n\t\/\/\n\n\tstore = memory.NewStore()\n\tis.NotZero(store)\n\n\tj := 0\n\tKeyGetter := func(ctx *fasthttp.RequestCtx) string {\n\t\tj++\n\t\treturn strconv.Itoa(j)\n\t}\n\tmiddleware = phi.NewMiddleware(limiter.New(store, rate), phi.WithKeyGetter(KeyGetter))\n\n\tis.NotZero(middleware)\n\n\trouter = libphi.NewRouter()\n\trouter.Use(middleware)\n\trouter.Get(\"\/\", func(ctx *fasthttp.RequestCtx) {\n\t\tctx.SetStatusCode(fasthttp.StatusOK)\n\t\tctx.SetBodyString(\"hello\")\n\t})\n\n\tfor i := int64(1); i <= clients; i++ {\n\t\tresp := fasthttp.AcquireResponse()\n\t\treq := fasthttp.AcquireRequest()\n\t\treq.Header.SetHost(\"localhost:8081\")\n\t\treq.Header.SetRequestURI(\"\/\")\n\t\terr := serve(router.ServeFastHTTP, req, resp)\n\t\tis.Nil(err)\n\t\tis.Equal(fasthttp.StatusOK, resp.StatusCode(), strconv.Itoa(int(i)))\n\t}\n}\n\nfunc serve(handler fasthttp.RequestHandler, req *fasthttp.Request, res *fasthttp.Response) error {\n\tln := fasthttputil.NewInmemoryListener()\n\tdefer ln.Close()\n\n\tgo func() {\n\t\terr := fasthttp.Serve(ln, handler)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tclient := fasthttp.Client{\n\t\tDial: func(addr string) (net.Conn, error) {\n\t\t\treturn ln.Dial()\n\t\t},\n\t}\n\n\treturn client.Do(req, res)\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport (\n\twt \"github.com\/weaveworks\/weave\/testing\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ TODO test gossip unicast and broadcast; atm we only test topology\n\/\/ gossip, which does not employ unicast or broadcast.\n\ntype mockChannelConnection struct {\n\tRemoteConnection\n\tdest *Router\n}\n\n\/\/ Construct a \"passive\" Router, i.e. without any goroutines, except\n\/\/ for Routes and GossipSenders.\nfunc NewTestRouter(name PeerName) *Router {\n\trouter := NewRouter(RouterConfig{Name: name})\n\t\/\/ need to create a dummy channel otherwise tests hang on nil\n\t\/\/ channels when the Router invoked ConnectionMaker.Refresh\n\trouter.ConnectionMaker.actionChan = make(chan ConnectionMakerAction, ChannelSize)\n\trouter.Routes.Start()\n\treturn router\n}\n\nfunc (conn *mockChannelConnection) SendProtocolMsg(protocolMsg ProtocolMsg) {\n\tif err := conn.dest.handleGossip(protocolMsg.tag, protocolMsg.msg); err != nil {\n\t\tpanic(err)\n\t}\n\tconn.dest.sendPendingGossip()\n}\n\nfunc (router *Router) AddTestChannelConnection(r *Router) {\n\tfromName := router.Ourself.Peer.Name\n\ttoName := r.Ourself.Peer.Name\n\n\tfromPeer := NewPeer(fromName, \"\", router.Ourself.Peer.UID, 0)\n\ttoPeer := NewPeer(toName, \"\", r.Ourself.Peer.UID, 0)\n\n\tr.Peers.FetchWithDefault(fromPeer) \/\/ Has side-effect of incrementing refcount\n\trouter.Peers.FetchWithDefault(toPeer) \/\/\n\n\tconn := &mockChannelConnection{RemoteConnection{router.Ourself.Peer, toPeer, \"\", false, true}, r}\n\trouter.Ourself.handleAddConnection(conn)\n\trouter.Ourself.handleConnectionEstablished(conn)\n\trouter.sendPendingGossip()\n}\n\nfunc (router *Router) DeleteTestChannelConnection(r *Router) {\n\tfromName := router.Ourself.Peer.Name\n\ttoName := r.Ourself.Peer.Name\n\n\tfromPeer, _ := r.Peers.Fetch(fromName)\n\ttoPeer, _ := router.Peers.Fetch(toName)\n\n\tfromPeer.DecrementLocalRefCount()\n\ttoPeer.DecrementLocalRefCount()\n\n\tconn, _ := router.Ourself.ConnectionTo(toName)\n\trouter.Ourself.handleDeleteConnection(conn)\n\trouter.sendPendingGossip()\n}\n\nfunc TestGossipTopology(t *testing.T) {\n\twt.RunWithTimeout(t, 1*time.Second, func() {\n\t\timplTestGossipTopology(t)\n\t})\n}\n\n\/\/ Create a Peer representing the receiver router, with connections to\n\/\/ the routers supplied as arguments, carrying across all UID and\n\/\/ version information.\nfunc (router *Router) tp(routers ...*Router) *Peer {\n\tpeer := NewPeer(router.Ourself.Peer.Name, \"\", router.Ourself.Peer.UID, 0)\n\tconnections := make(map[PeerName]Connection)\n\tfor _, r := range routers {\n\t\tp := NewPeer(r.Ourself.Peer.Name, \"\", r.Ourself.Peer.UID, r.Ourself.Peer.version)\n\t\tconnections[r.Ourself.Peer.Name] = newMockConnection(peer, p)\n\t}\n\tpeer.SetVersionAndConnections(router.Ourself.Peer.version, connections)\n\treturn peer\n}\n\n\/\/ Check that the topology of router matches the peers and all of their connections\nfunc checkTopology(t *testing.T, router *Router, wantedPeers ...*Peer) {\n\tcheckTopologyPeers(t, true, router.Peers.allPeers(), wantedPeers...)\n}\n\nfunc implTestGossipTopology(t *testing.T) {\n\t\/\/ Create some peers that will talk to each other\n\tpeer1Name, _ := PeerNameFromString(\"01:00:00:01:00:00\")\n\tpeer2Name, _ := PeerNameFromString(\"02:00:00:02:00:00\")\n\tpeer3Name, _ := PeerNameFromString(\"03:00:00:03:00:00\")\n\tr1 := NewTestRouter(peer1Name)\n\tr2 := NewTestRouter(peer2Name)\n\tr3 := NewTestRouter(peer3Name)\n\n\t\/\/ Check state when they have no connections\n\tcheckTopology(t, r1, r1.tp())\n\tcheckTopology(t, r2, r2.tp())\n\n\t\/\/ Now try adding some connections\n\tr1.AddTestChannelConnection(r2)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp())\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp())\n\tr2.AddTestChannelConnection(r1)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1))\n\n\t\/\/ Currently, the connection from 2 to 3 is one-way only\n\tr2.AddTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1, r3), r3.tp())\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1, r3), r3.tp())\n\t\/\/ When r2 gossiped to r3, 1 was unreachable from r3 so it got removed from the\n\t\/\/ list of peers, but remains referenced in the connection from 1 to 3.\n\tcheckTopology(t, r3, r2.tp(r1, r3), r3.tp())\n\n\t\/\/ Add a connection from 3 to 1 and now r1 is reachable.\n\tr3.AddTestChannelConnection(r1)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r3, r1.tp(), r2.tp(r1, r3), r3.tp(r1))\n\n\tr1.AddTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2, r3), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2, r3), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r3, r1.tp(r2, r3), r2.tp(r1, r3), r3.tp(r1))\n\n\t\/\/ Drop the connection from 2 to 3\n\tr2.DeleteTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2, r3), r2.tp(r1), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2, r3), r2.tp(r1))\n\tcheckTopology(t, r3, r1.tp(r2, r3), r2.tp(r1), r3.tp(r1))\n\n\t\/\/ Drop the connection from 1 to 3\n\tr1.DeleteTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1), r3.tp(r1))\n\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1))\n\t\/\/ r3 still thinks r1 has a connection to it\n\tcheckTopology(t, r3, r1.tp(r2, r3), r2.tp(r1), r3.tp(r1))\n\n\t\/\/ On a timer, r3 will gossip to r1\n\tr3.SendAllGossip()\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1), r3.tp(r1))\n}\n<commit_msg>fix bug<commit_after>package router\n\nimport (\n\twt \"github.com\/weaveworks\/weave\/testing\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ TODO test gossip unicast and broadcast; atm we only test topology\n\/\/ gossip, which does not employ unicast or broadcast.\n\ntype mockChannelConnection struct {\n\tRemoteConnection\n\tdest *Router\n}\n\n\/\/ Construct a \"passive\" Router, i.e. without any goroutines, except\n\/\/ for Routes and GossipSenders.\nfunc NewTestRouter(name PeerName) *Router {\n\trouter := NewRouter(RouterConfig{}, name, \"\")\n\t\/\/ need to create a dummy channel otherwise tests hang on nil\n\t\/\/ channels when the Router invoked ConnectionMaker.Refresh\n\trouter.ConnectionMaker.actionChan = make(chan ConnectionMakerAction, ChannelSize)\n\trouter.Routes.Start()\n\treturn router\n}\n\nfunc (conn *mockChannelConnection) SendProtocolMsg(protocolMsg ProtocolMsg) {\n\tif err := conn.dest.handleGossip(protocolMsg.tag, protocolMsg.msg); err != nil {\n\t\tpanic(err)\n\t}\n\tconn.dest.sendPendingGossip()\n}\n\nfunc (router *Router) AddTestChannelConnection(r *Router) {\n\tfromName := router.Ourself.Peer.Name\n\ttoName := r.Ourself.Peer.Name\n\n\tfromPeer := NewPeer(fromName, \"\", router.Ourself.Peer.UID, 0)\n\ttoPeer := NewPeer(toName, \"\", r.Ourself.Peer.UID, 0)\n\n\tr.Peers.FetchWithDefault(fromPeer) \/\/ Has side-effect of incrementing refcount\n\trouter.Peers.FetchWithDefault(toPeer) \/\/\n\n\tconn := &mockChannelConnection{RemoteConnection{router.Ourself.Peer, toPeer, \"\", false, true}, r}\n\trouter.Ourself.handleAddConnection(conn)\n\trouter.Ourself.handleConnectionEstablished(conn)\n\trouter.sendPendingGossip()\n}\n\nfunc (router *Router) DeleteTestChannelConnection(r *Router) {\n\tfromName := router.Ourself.Peer.Name\n\ttoName := r.Ourself.Peer.Name\n\n\tfromPeer, _ := r.Peers.Fetch(fromName)\n\ttoPeer, _ := router.Peers.Fetch(toName)\n\n\tfromPeer.DecrementLocalRefCount()\n\ttoPeer.DecrementLocalRefCount()\n\n\tconn, _ := router.Ourself.ConnectionTo(toName)\n\trouter.Ourself.handleDeleteConnection(conn)\n\trouter.sendPendingGossip()\n}\n\nfunc TestGossipTopology(t *testing.T) {\n\twt.RunWithTimeout(t, 1*time.Second, func() {\n\t\timplTestGossipTopology(t)\n\t})\n}\n\n\/\/ Create a Peer representing the receiver router, with connections to\n\/\/ the routers supplied as arguments, carrying across all UID and\n\/\/ version information.\nfunc (router *Router) tp(routers ...*Router) *Peer {\n\tpeer := NewPeer(router.Ourself.Peer.Name, \"\", router.Ourself.Peer.UID, 0)\n\tconnections := make(map[PeerName]Connection)\n\tfor _, r := range routers {\n\t\tp := NewPeer(r.Ourself.Peer.Name, \"\", r.Ourself.Peer.UID, r.Ourself.Peer.version)\n\t\tconnections[r.Ourself.Peer.Name] = newMockConnection(peer, p)\n\t}\n\tpeer.SetVersionAndConnections(router.Ourself.Peer.version, connections)\n\treturn peer\n}\n\n\/\/ Check that the topology of router matches the peers and all of their connections\nfunc checkTopology(t *testing.T, router *Router, wantedPeers ...*Peer) {\n\tcheckTopologyPeers(t, true, router.Peers.allPeers(), wantedPeers...)\n}\n\nfunc implTestGossipTopology(t *testing.T) {\n\t\/\/ Create some peers that will talk to each other\n\tpeer1Name, _ := PeerNameFromString(\"01:00:00:01:00:00\")\n\tpeer2Name, _ := PeerNameFromString(\"02:00:00:02:00:00\")\n\tpeer3Name, _ := PeerNameFromString(\"03:00:00:03:00:00\")\n\tr1 := NewTestRouter(peer1Name)\n\tr2 := NewTestRouter(peer2Name)\n\tr3 := NewTestRouter(peer3Name)\n\n\t\/\/ Check state when they have no connections\n\tcheckTopology(t, r1, r1.tp())\n\tcheckTopology(t, r2, r2.tp())\n\n\t\/\/ Now try adding some connections\n\tr1.AddTestChannelConnection(r2)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp())\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp())\n\tr2.AddTestChannelConnection(r1)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1))\n\n\t\/\/ Currently, the connection from 2 to 3 is one-way only\n\tr2.AddTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1, r3), r3.tp())\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1, r3), r3.tp())\n\t\/\/ When r2 gossiped to r3, 1 was unreachable from r3 so it got removed from the\n\t\/\/ list of peers, but remains referenced in the connection from 1 to 3.\n\tcheckTopology(t, r3, r2.tp(r1, r3), r3.tp())\n\n\t\/\/ Add a connection from 3 to 1 and now r1 is reachable.\n\tr3.AddTestChannelConnection(r1)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r3, r1.tp(), r2.tp(r1, r3), r3.tp(r1))\n\n\tr1.AddTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2, r3), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2, r3), r2.tp(r1, r3), r3.tp(r1))\n\tcheckTopology(t, r3, r1.tp(r2, r3), r2.tp(r1, r3), r3.tp(r1))\n\n\t\/\/ Drop the connection from 2 to 3\n\tr2.DeleteTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2, r3), r2.tp(r1), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2, r3), r2.tp(r1))\n\tcheckTopology(t, r3, r1.tp(r2, r3), r2.tp(r1), r3.tp(r1))\n\n\t\/\/ Drop the connection from 1 to 3\n\tr1.DeleteTestChannelConnection(r3)\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1), r3.tp(r1))\n\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1), r3.tp(r1))\n\tcheckTopology(t, r2, r1.tp(r2), r2.tp(r1))\n\t\/\/ r3 still thinks r1 has a connection to it\n\tcheckTopology(t, r3, r1.tp(r2, r3), r2.tp(r1), r3.tp(r1))\n\n\t\/\/ On a timer, r3 will gossip to r1\n\tr3.SendAllGossip()\n\tcheckTopology(t, r1, r1.tp(r2), r2.tp(r1), r3.tp(r1))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 ASoulDocs. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage store\n\nimport (\n\t\"bytes\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\n\tgoldmarktoc \"github.com\/abhinav\/goldmark-toc\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/yuin\/goldmark\"\n\temoji \"github.com\/yuin\/goldmark-emoji\"\n\thighlighting \"github.com\/yuin\/goldmark-highlighting\"\n\tgoldmarkmeta \"github.com\/yuin\/goldmark-meta\"\n\t\"github.com\/yuin\/goldmark\/ast\"\n\t\"github.com\/yuin\/goldmark\/extension\"\n\t\"github.com\/yuin\/goldmark\/parser\"\n\tgoldmarkhtml \"github.com\/yuin\/goldmark\/renderer\/html\"\n\t\"github.com\/yuin\/goldmark\/text\"\n)\n\nfunc convertFile(pathPrefix, file string) (content []byte, meta map[string]interface{}, headings goldmarktoc.Items, err error) {\n\tbody, err := os.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"read\")\n\t}\n\n\tmd := goldmark.New(\n\t\tgoldmark.WithParserOptions(\n\t\t\tparser.WithAutoHeadingID(),\n\t\t),\n\t\tgoldmark.WithRendererOptions(\n\t\t\tgoldmarkhtml.WithHardWraps(),\n\t\t\tgoldmarkhtml.WithXHTML(),\n\t\t\tgoldmarkhtml.WithUnsafe(),\n\t\t),\n\t\tgoldmark.WithExtensions(\n\t\t\textension.GFM,\n\t\t\tgoldmarkmeta.Meta,\n\t\t\temoji.Emoji,\n\t\t\thighlighting.NewHighlighting(\n\t\t\t\thighlighting.WithStyle(\"base16-snazzy\"),\n\t\t\t\thighlighting.WithGuessLanguage(true),\n\t\t\t),\n\t\t\textension.NewFootnote(),\n\t\t),\n\t)\n\n\tctx := parser.NewContext()\n\tdoc := md.Parser().Parse(text.NewReader(body), parser.WithContext(ctx))\n\n\t\/\/ Headings\n\ttree, err := goldmarktoc.Inspect(doc, body)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"inspect headings\")\n\t}\n\theadings = tree.Items\n\tif len(headings) > 0 {\n\t\theadings = headings[0].Items\n\t}\n\n\t\/\/ Links\n\terr = inspectLinks(pathPrefix, doc)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"inspect links\")\n\t}\n\n\tvar buf bytes.Buffer\n\terr = md.Renderer().Render(&buf, body, doc)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"render\")\n\t}\n\n\treturn buf.Bytes(), goldmarkmeta.Get(ctx), headings, nil\n}\n\nfunc inspectLinks(pathPrefix string, doc ast.Node) error {\n\treturn ast.Walk(doc, func(n ast.Node, entering bool) (ast.WalkStatus, error) {\n\t\tif !entering {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tlink, ok := n.(*ast.Link)\n\t\tif !ok {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tdest, err := url.Parse(string(link.Destination))\n\t\tif err != nil {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tif dest.Scheme == \"http\" || dest.Scheme == \"https\" {\n\t\t\t\/\/ TODO: external links adds an SVG\n\t\t\treturn ast.WalkSkipChildren, nil\n\t\t} else if dest.Scheme != \"\" {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tif bytes.HasPrefix(link.Destination, []byte(\"#\")) {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\t\/\/ Example: README.md => \/docs\/introduction\n\t\tif bytes.EqualFold(link.Destination, []byte(readme+\".md\")) {\n\t\t\tlink.Destination = []byte(pathPrefix)\n\t\t\treturn ast.WalkSkipChildren, nil\n\t\t}\n\n\t\t\/\/ Example: \"installation.md\" => \"installation\"\n\t\tlink.Destination = bytes.TrimSuffix(link.Destination, []byte(\".md\"))\n\n\t\t\/\/ Example: \"..\/howto\/README\" => \"..\/howto\/\"\n\t\tlink.Destination = bytes.TrimSuffix(link.Destination, []byte(readme))\n\n\t\t\/\/ Example: (\"\/docs\", \"..\/howto\/\") => \"\/docs\/howto\"\n\t\tlink.Destination = []byte(path.Join(pathPrefix, string(link.Destination)))\n\t\treturn ast.WalkSkipChildren, nil\n\t})\n}\n<commit_msg>markdown: support anchors within links<commit_after>\/\/ Copyright 2022 ASoulDocs. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage store\n\nimport (\n\t\"bytes\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\n\tgoldmarktoc \"github.com\/abhinav\/goldmark-toc\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/yuin\/goldmark\"\n\temoji \"github.com\/yuin\/goldmark-emoji\"\n\thighlighting \"github.com\/yuin\/goldmark-highlighting\"\n\tgoldmarkmeta \"github.com\/yuin\/goldmark-meta\"\n\t\"github.com\/yuin\/goldmark\/ast\"\n\t\"github.com\/yuin\/goldmark\/extension\"\n\t\"github.com\/yuin\/goldmark\/parser\"\n\tgoldmarkhtml \"github.com\/yuin\/goldmark\/renderer\/html\"\n\t\"github.com\/yuin\/goldmark\/text\"\n)\n\nfunc convertFile(pathPrefix, file string) (content []byte, meta map[string]interface{}, headings goldmarktoc.Items, err error) {\n\tbody, err := os.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"read\")\n\t}\n\n\tmd := goldmark.New(\n\t\tgoldmark.WithParserOptions(\n\t\t\tparser.WithAutoHeadingID(),\n\t\t),\n\t\tgoldmark.WithRendererOptions(\n\t\t\tgoldmarkhtml.WithHardWraps(),\n\t\t\tgoldmarkhtml.WithXHTML(),\n\t\t\tgoldmarkhtml.WithUnsafe(),\n\t\t),\n\t\tgoldmark.WithExtensions(\n\t\t\textension.GFM,\n\t\t\tgoldmarkmeta.Meta,\n\t\t\temoji.Emoji,\n\t\t\thighlighting.NewHighlighting(\n\t\t\t\thighlighting.WithStyle(\"base16-snazzy\"),\n\t\t\t\thighlighting.WithGuessLanguage(true),\n\t\t\t),\n\t\t\textension.NewFootnote(),\n\t\t),\n\t)\n\n\tctx := parser.NewContext()\n\tdoc := md.Parser().Parse(text.NewReader(body), parser.WithContext(ctx))\n\n\t\/\/ Headings\n\ttree, err := goldmarktoc.Inspect(doc, body)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"inspect headings\")\n\t}\n\theadings = tree.Items\n\tif len(headings) > 0 {\n\t\theadings = headings[0].Items\n\t}\n\n\t\/\/ Links\n\terr = inspectLinks(pathPrefix, doc)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"inspect links\")\n\t}\n\n\tvar buf bytes.Buffer\n\terr = md.Renderer().Render(&buf, body, doc)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Wrap(err, \"render\")\n\t}\n\n\treturn buf.Bytes(), goldmarkmeta.Get(ctx), headings, nil\n}\n\nfunc inspectLinks(pathPrefix string, doc ast.Node) error {\n\treturn ast.Walk(doc, func(n ast.Node, entering bool) (ast.WalkStatus, error) {\n\t\tif !entering {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tlink, ok := n.(*ast.Link)\n\t\tif !ok {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tdest, err := url.Parse(string(link.Destination))\n\t\tif err != nil {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tif dest.Scheme == \"http\" || dest.Scheme == \"https\" {\n\t\t\t\/\/ TODO: external links adds an SVG\n\t\t\treturn ast.WalkSkipChildren, nil\n\t\t} else if dest.Scheme != \"\" {\n\t\t\treturn ast.WalkContinue, nil\n\t\t}\n\n\t\tvar anchor []byte\n\t\tif i := bytes.IndexByte(link.Destination, '#'); i > -1 {\n\t\t\tif i == 0 {\n\t\t\t\treturn ast.WalkContinue, nil\n\t\t\t}\n\n\t\t\tanchor = link.Destination[i:]\n\t\t\tlink.Destination = link.Destination[:i]\n\t\t}\n\n\t\t\/\/ Example: README.md => \/docs\/introduction\n\t\tif bytes.EqualFold(link.Destination, []byte(readme+\".md\")) {\n\t\t\tlink.Destination = append([]byte(pathPrefix), anchor...)\n\t\t\treturn ast.WalkSkipChildren, nil\n\t\t}\n\n\t\t\/\/ Example: \"installation.md\" => \"installation\"\n\t\tlink.Destination = bytes.TrimSuffix(link.Destination, []byte(\".md\"))\n\n\t\t\/\/ Example: \"..\/howto\/README\" => \"..\/howto\/\"\n\t\tlink.Destination = bytes.TrimSuffix(link.Destination, []byte(readme))\n\n\t\t\/\/ Example: (\"\/docs\", \"..\/howto\/\") => \"\/docs\/howto\"\n\t\tlink.Destination = []byte(path.Join(pathPrefix, string(link.Destination)))\n\n\t\tlink.Destination = append(link.Destination, anchor...)\n\t\treturn ast.WalkSkipChildren, nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package interpreter\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/zetamatta\/go-findfile\"\n\n\t\"..\/dos\"\n)\n\nconst FLAG_AMP2NEWCONSOLE = false\n\nvar WildCardExpansionAlways = false\n\nvar dbg = false\n\ntype CommandNotFound struct {\n\tName string\n\tErr error\n}\n\n\/\/ from \"TDM-GCC-64\/x86_64-w64-mingw32\/include\/winbase.h\"\nconst (\n\tCREATE_NEW_CONSOLE = 0x10\n\tCREATE_NEW_PROCESS_GROUP = 0x200\n)\n\nfunc (this CommandNotFound) Stringer() string {\n\treturn fmt.Sprintf(\"'%s' is not recognized as an internal or external command,\\noperable program or batch file\", this.Name)\n}\n\nfunc (this CommandNotFound) Error() string {\n\treturn this.Stringer()\n}\n\ntype ErrorLevel int\n\nconst (\n\tNOERROR ErrorLevel = 0\n\tTHROUGH ErrorLevel = -1\n\tSHUTDOWN ErrorLevel = -2\n)\n\nfunc (this ErrorLevel) HasValue() bool {\n\treturn this >= NOERROR\n}\n\nfunc (this ErrorLevel) HasError() bool {\n\treturn this > NOERROR\n}\n\nfunc (this ErrorLevel) String() string {\n\tswitch this {\n\tcase THROUGH:\n\t\treturn \"THROUGH\"\n\tcase SHUTDOWN:\n\t\treturn \"SHUTDOWN\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d\", this)\n\t}\n}\n\ntype Interpreter struct {\n\texec.Cmd\n\tStdio [3]*os.File\n\tHookCount int\n\tTag interface{}\n\tPipeSeq [2]uint\n\tIsBackGround bool\n\tRawArgs []string\n\n\tOnClone func(*Interpreter) error\n\tClosers []io.Closer\n}\n\nfunc (this *Interpreter) closeAtEnd() {\n\tif this.Closers != nil {\n\t\tfor _, c := range this.Closers {\n\t\t\tc.Close()\n\t\t}\n\t\tthis.Closers = nil\n\t}\n}\n\nfunc (this *Interpreter) Close() {\n\tthis.closeAtEnd()\n}\n\nfunc New() *Interpreter {\n\tthis := Interpreter{\n\t\tStdio: [3]*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t}\n\tthis.Stdin = os.Stdin\n\tthis.Stdout = os.Stdout\n\tthis.Stderr = os.Stderr\n\tthis.PipeSeq[0] = pipeSeq\n\tthis.PipeSeq[1] = 0\n\tthis.Tag = nil\n\treturn &this\n}\n\nfunc (this *Interpreter) SetStdin(f *os.File) {\n\tthis.Stdio[0] = f\n\tthis.Stdin = f\n}\nfunc (this *Interpreter) SetStdout(f *os.File) {\n\tthis.Stdio[1] = f\n\tthis.Stdout = f\n}\nfunc (this *Interpreter) SetStderr(f *os.File) {\n\tthis.Stdio[2] = f\n\tthis.Stderr = f\n}\n\nfunc (this *Interpreter) Clone() (*Interpreter, error) {\n\trv := new(Interpreter)\n\trv.Stdio[0] = this.Stdio[0]\n\trv.Stdio[1] = this.Stdio[1]\n\trv.Stdio[2] = this.Stdio[2]\n\trv.Stdin = this.Stdin\n\trv.Stdout = this.Stdout\n\trv.Stderr = this.Stderr\n\trv.HookCount = this.HookCount\n\trv.Tag = this.Tag\n\trv.PipeSeq = rv.PipeSeq\n\trv.Closers = nil\n\trv.OnClone = this.OnClone\n\tif this.OnClone != nil {\n\t\tif err := this.OnClone(rv); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn rv, nil\n}\n\ntype ArgsHookT func(it *Interpreter, args []string) ([]string, error)\n\nvar argsHook = func(it *Interpreter, args []string) ([]string, error) {\n\treturn args, nil\n}\n\nfunc SetArgsHook(argsHook_ ArgsHookT) (rv ArgsHookT) {\n\trv, argsHook = argsHook, argsHook_\n\treturn\n}\n\ntype HookT func(*Interpreter) (ErrorLevel, error)\n\nvar hook = func(*Interpreter) (ErrorLevel, error) {\n\treturn THROUGH, nil\n}\n\nfunc SetHook(hook_ HookT) (rv HookT) {\n\trv, hook = hook, hook_\n\treturn\n}\n\nvar OnCommandNotFound = func(this *Interpreter, err error) error {\n\terr = &CommandNotFound{this.Args[0], err}\n\treturn err\n}\n\nvar ErrorLevelStr string\n\nfunc nvl(a *os.File, b *os.File) *os.File {\n\tif a != nil {\n\t\treturn a\n\t} else {\n\t\treturn b\n\t}\n}\n\nfunc (this *Interpreter) spawnvp_noerrmsg() (ErrorLevel, error) {\n\t\/\/ command is empty.\n\tif len(this.Args) <= 0 {\n\t\treturn NOERROR, nil\n\t}\n\tif dbg {\n\t\tprint(\"spawnvp_noerrmsg('\", this.Args[0], \"')\\n\")\n\t}\n\n\t\/\/ aliases and lua-commands\n\tif errorlevel, err := hook(this); errorlevel != THROUGH || err != nil {\n\t\treturn errorlevel, err\n\t}\n\n\t\/\/ command not found hook\n\tvar err error\n\tthis.Path, err = exec.LookPath(this.Args[0])\n\tif err != nil {\n\t\treturn ErrorLevel(255), OnCommandNotFound(this, err)\n\t}\n\n\tif WildCardExpansionAlways {\n\t\tthis.Args = findfile.Globs(this.Args)\n\t}\n\n\t\/\/ executable-file\n\tif FLAG_AMP2NEWCONSOLE {\n\t\tif this.SysProcAttr != nil && (this.SysProcAttr.CreationFlags&CREATE_NEW_CONSOLE) != 0 {\n\t\t\terr = this.Start()\n\t\t\treturn ErrorLevel(0), err\n\t\t}\n\t}\n\terr = this.Run()\n\n\terrorlevel, errorlevelOk := dos.GetErrorLevel(&this.Cmd)\n\tif errorlevelOk {\n\t\treturn ErrorLevel(errorlevel), err\n\t} else {\n\t\treturn ErrorLevel(255), err\n\t}\n}\n\nfunc (this *Interpreter) Spawnvp() (ErrorLevel, error) {\n\terrorlevel, err := this.spawnvp_noerrmsg()\n\tif err != nil {\n\t\tfmt.Fprintln(this.Stderr, err.Error())\n\t}\n\treturn errorlevel, err\n}\n\ntype result_t struct {\n\tNextValue ErrorLevel\n\tError error\n}\n\nvar pipeSeq uint = 0\n\nfunc (this *Interpreter) Interpret(text string) (errorlevel ErrorLevel, err error) {\n\tif dbg {\n\t\tprint(\"Interpret('\", text, \"')\\n\")\n\t}\n\tif this == nil {\n\t\treturn ErrorLevel(255), errors.New(\"Fatal Error: Interpret: instance is nil\")\n\t}\n\terrorlevel = NOERROR\n\terr = nil\n\n\tstatements, statementsErr := Parse(text)\n\tif statementsErr != nil {\n\t\tif dbg {\n\t\t\tprint(\"Parse Error:\", statementsErr.Error(), \"\\n\")\n\t\t}\n\t\treturn NOERROR, statementsErr\n\t}\n\tif argsHook != nil {\n\t\tif dbg {\n\t\t\tprint(\"call argsHook\\n\")\n\t\t}\n\t\tfor _, pipeline := range statements {\n\t\t\tfor _, state := range pipeline {\n\t\t\t\tstate.Args, err = argsHook(this, state.Args)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ErrorLevel(255), err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif dbg {\n\t\t\tprint(\"done argsHook\\n\")\n\t\t}\n\t}\n\tfor _, pipeline := range statements {\n\t\tfor i, state := range pipeline {\n\t\t\tif state.Term == \"|\" && (i+1 >= len(pipeline) || len(pipeline[i+1].Args) <= 0) {\n\t\t\t\treturn ErrorLevel(255), errors.New(\"The syntax of the command is incorrect.\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, pipeline := range statements {\n\n\t\tvar pipeIn *os.File = nil\n\t\tpipeSeq++\n\t\tisBackGround := this.IsBackGround\n\t\tfor _, state := range pipeline {\n\t\t\tif state.Term == \"&\" {\n\t\t\t\tisBackGround = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar wg sync.WaitGroup\n\t\tfor i, state := range pipeline {\n\t\t\tif dbg {\n\t\t\t\tprint(i, \": pipeline loop(\", state.Args[0], \")\\n\")\n\t\t\t}\n\t\t\tcmd := new(Interpreter)\n\t\t\tcmd.PipeSeq[0] = pipeSeq\n\t\t\tcmd.PipeSeq[1] = uint(1 + i)\n\t\t\tcmd.IsBackGround = isBackGround\n\t\t\tcmd.Tag = this.Tag\n\t\t\tcmd.HookCount = this.HookCount\n\t\t\tcmd.SetStdin(nvl(this.Stdio[0], os.Stdin))\n\t\t\tcmd.SetStdout(nvl(this.Stdio[1], os.Stdout))\n\t\t\tcmd.SetStderr(nvl(this.Stdio[2], os.Stderr))\n\t\t\tcmd.OnClone = this.OnClone\n\t\t\tif this.OnClone != nil {\n\t\t\t\tif err := this.OnClone(cmd); err != nil {\n\t\t\t\t\treturn ErrorLevel(255), err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar err error = nil\n\n\t\t\tif pipeIn != nil {\n\t\t\t\tcmd.SetStdin(pipeIn)\n\t\t\t\tcmd.Closers = append(cmd.Closers, pipeIn)\n\t\t\t\tpipeIn = nil\n\t\t\t}\n\n\t\t\tif state.Term[0] == '|' {\n\t\t\t\tvar pipeOut *os.File\n\t\t\t\tpipeIn, pipeOut, err = os.Pipe()\n\t\t\t\tcmd.SetStdout(pipeOut)\n\t\t\t\tif state.Term == \"|&\" {\n\t\t\t\t\tcmd.SetStderr(pipeOut)\n\t\t\t\t}\n\t\t\t\tcmd.Closers = append(cmd.Closers, pipeOut)\n\t\t\t}\n\n\t\t\tfor _, red := range state.Redirect {\n\t\t\t\tvar fd *os.File\n\t\t\t\tfd, err = red.OpenOn(cmd)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn NOERROR, err\n\t\t\t\t}\n\t\t\t\tdefer fd.Close()\n\t\t\t}\n\n\t\t\tcmd.Args = state.Args\n\t\t\tcmd.RawArgs = state.RawArgs\n\t\t\tif i > 0 {\n\t\t\t\tcmd.IsBackGround = true\n\t\t\t}\n\t\t\tif i == len(pipeline)-1 && state.Term != \"&\" {\n\t\t\t\terrorlevel, err = cmd.Spawnvp()\n\t\t\t\tcmd.closeAtEnd()\n\t\t\t\tErrorLevelStr = errorlevel.String()\n\t\t\t\tcmd.Close()\n\t\t\t} else {\n\t\t\t\tif !isBackGround {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t}\n\t\t\t\tgo func(cmd1 *Interpreter) {\n\t\t\t\t\tif isBackGround {\n\t\t\t\t\t\tif FLAG_AMP2NEWCONSOLE {\n\t\t\t\t\t\t\tif len(pipeline) == 1 {\n\t\t\t\t\t\t\t\tcmd1.SysProcAttr = &syscall.SysProcAttr{\n\t\t\t\t\t\t\t\t\tCreationFlags: CREATE_NEW_CONSOLE |\n\t\t\t\t\t\t\t\t\t\tCREATE_NEW_PROCESS_GROUP,\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t}\n\t\t\t\t\tcmd1.Spawnvp()\n\t\t\t\t\tcmd1.closeAtEnd()\n\t\t\t\t\tcmd1.Close()\n\t\t\t\t}(cmd)\n\t\t\t}\n\t\t}\n\t\tif !isBackGround {\n\t\t\twg.Wait()\n\t\t\tif len(pipeline) > 0 {\n\t\t\t\tswitch pipeline[len(pipeline)-1].Term {\n\t\t\t\tcase \"&&\":\n\t\t\t\t\tif errorlevel != 0 {\n\t\t\t\t\t\treturn errorlevel, nil\n\t\t\t\t\t}\n\t\t\t\tcase \"||\":\n\t\t\t\t\tif errorlevel == 0 {\n\t\t\t\t\t\treturn errorlevel, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>Removed Interpreter.closeAtEnd() & codes move into .Close()<commit_after>package interpreter\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/zetamatta\/go-findfile\"\n\n\t\"..\/dos\"\n)\n\nconst FLAG_AMP2NEWCONSOLE = false\n\nvar WildCardExpansionAlways = false\n\nvar dbg = false\n\ntype CommandNotFound struct {\n\tName string\n\tErr error\n}\n\n\/\/ from \"TDM-GCC-64\/x86_64-w64-mingw32\/include\/winbase.h\"\nconst (\n\tCREATE_NEW_CONSOLE = 0x10\n\tCREATE_NEW_PROCESS_GROUP = 0x200\n)\n\nfunc (this CommandNotFound) Stringer() string {\n\treturn fmt.Sprintf(\"'%s' is not recognized as an internal or external command,\\noperable program or batch file\", this.Name)\n}\n\nfunc (this CommandNotFound) Error() string {\n\treturn this.Stringer()\n}\n\ntype ErrorLevel int\n\nconst (\n\tNOERROR ErrorLevel = 0\n\tTHROUGH ErrorLevel = -1\n\tSHUTDOWN ErrorLevel = -2\n)\n\nfunc (this ErrorLevel) HasValue() bool {\n\treturn this >= NOERROR\n}\n\nfunc (this ErrorLevel) HasError() bool {\n\treturn this > NOERROR\n}\n\nfunc (this ErrorLevel) String() string {\n\tswitch this {\n\tcase THROUGH:\n\t\treturn \"THROUGH\"\n\tcase SHUTDOWN:\n\t\treturn \"SHUTDOWN\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d\", this)\n\t}\n}\n\ntype Interpreter struct {\n\texec.Cmd\n\tStdio [3]*os.File\n\tHookCount int\n\tTag interface{}\n\tPipeSeq [2]uint\n\tIsBackGround bool\n\tRawArgs []string\n\n\tOnClone func(*Interpreter) error\n\tClosers []io.Closer\n}\n\nfunc (this *Interpreter) Close() {\n\tif this.Closers != nil {\n\t\tfor _, c := range this.Closers {\n\t\t\tc.Close()\n\t\t}\n\t\tthis.Closers = nil\n\t}\n}\n\nfunc New() *Interpreter {\n\tthis := Interpreter{\n\t\tStdio: [3]*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t}\n\tthis.Stdin = os.Stdin\n\tthis.Stdout = os.Stdout\n\tthis.Stderr = os.Stderr\n\tthis.PipeSeq[0] = pipeSeq\n\tthis.PipeSeq[1] = 0\n\tthis.Tag = nil\n\treturn &this\n}\n\nfunc (this *Interpreter) SetStdin(f *os.File) {\n\tthis.Stdio[0] = f\n\tthis.Stdin = f\n}\nfunc (this *Interpreter) SetStdout(f *os.File) {\n\tthis.Stdio[1] = f\n\tthis.Stdout = f\n}\nfunc (this *Interpreter) SetStderr(f *os.File) {\n\tthis.Stdio[2] = f\n\tthis.Stderr = f\n}\n\nfunc (this *Interpreter) Clone() (*Interpreter, error) {\n\trv := new(Interpreter)\n\trv.Stdio[0] = this.Stdio[0]\n\trv.Stdio[1] = this.Stdio[1]\n\trv.Stdio[2] = this.Stdio[2]\n\trv.Stdin = this.Stdin\n\trv.Stdout = this.Stdout\n\trv.Stderr = this.Stderr\n\trv.HookCount = this.HookCount\n\trv.Tag = this.Tag\n\trv.PipeSeq = rv.PipeSeq\n\trv.Closers = nil\n\trv.OnClone = this.OnClone\n\tif this.OnClone != nil {\n\t\tif err := this.OnClone(rv); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn rv, nil\n}\n\ntype ArgsHookT func(it *Interpreter, args []string) ([]string, error)\n\nvar argsHook = func(it *Interpreter, args []string) ([]string, error) {\n\treturn args, nil\n}\n\nfunc SetArgsHook(argsHook_ ArgsHookT) (rv ArgsHookT) {\n\trv, argsHook = argsHook, argsHook_\n\treturn\n}\n\ntype HookT func(*Interpreter) (ErrorLevel, error)\n\nvar hook = func(*Interpreter) (ErrorLevel, error) {\n\treturn THROUGH, nil\n}\n\nfunc SetHook(hook_ HookT) (rv HookT) {\n\trv, hook = hook, hook_\n\treturn\n}\n\nvar OnCommandNotFound = func(this *Interpreter, err error) error {\n\terr = &CommandNotFound{this.Args[0], err}\n\treturn err\n}\n\nvar ErrorLevelStr string\n\nfunc nvl(a *os.File, b *os.File) *os.File {\n\tif a != nil {\n\t\treturn a\n\t} else {\n\t\treturn b\n\t}\n}\n\nfunc (this *Interpreter) spawnvp_noerrmsg() (ErrorLevel, error) {\n\t\/\/ command is empty.\n\tif len(this.Args) <= 0 {\n\t\treturn NOERROR, nil\n\t}\n\tif dbg {\n\t\tprint(\"spawnvp_noerrmsg('\", this.Args[0], \"')\\n\")\n\t}\n\n\t\/\/ aliases and lua-commands\n\tif errorlevel, err := hook(this); errorlevel != THROUGH || err != nil {\n\t\treturn errorlevel, err\n\t}\n\n\t\/\/ command not found hook\n\tvar err error\n\tthis.Path, err = exec.LookPath(this.Args[0])\n\tif err != nil {\n\t\treturn ErrorLevel(255), OnCommandNotFound(this, err)\n\t}\n\n\tif WildCardExpansionAlways {\n\t\tthis.Args = findfile.Globs(this.Args)\n\t}\n\n\t\/\/ executable-file\n\tif FLAG_AMP2NEWCONSOLE {\n\t\tif this.SysProcAttr != nil && (this.SysProcAttr.CreationFlags&CREATE_NEW_CONSOLE) != 0 {\n\t\t\terr = this.Start()\n\t\t\treturn ErrorLevel(0), err\n\t\t}\n\t}\n\terr = this.Run()\n\n\terrorlevel, errorlevelOk := dos.GetErrorLevel(&this.Cmd)\n\tif errorlevelOk {\n\t\treturn ErrorLevel(errorlevel), err\n\t} else {\n\t\treturn ErrorLevel(255), err\n\t}\n}\n\nfunc (this *Interpreter) Spawnvp() (ErrorLevel, error) {\n\terrorlevel, err := this.spawnvp_noerrmsg()\n\tif err != nil {\n\t\tfmt.Fprintln(this.Stderr, err.Error())\n\t}\n\treturn errorlevel, err\n}\n\ntype result_t struct {\n\tNextValue ErrorLevel\n\tError error\n}\n\nvar pipeSeq uint = 0\n\nfunc (this *Interpreter) Interpret(text string) (errorlevel ErrorLevel, err error) {\n\tif dbg {\n\t\tprint(\"Interpret('\", text, \"')\\n\")\n\t}\n\tif this == nil {\n\t\treturn ErrorLevel(255), errors.New(\"Fatal Error: Interpret: instance is nil\")\n\t}\n\terrorlevel = NOERROR\n\terr = nil\n\n\tstatements, statementsErr := Parse(text)\n\tif statementsErr != nil {\n\t\tif dbg {\n\t\t\tprint(\"Parse Error:\", statementsErr.Error(), \"\\n\")\n\t\t}\n\t\treturn NOERROR, statementsErr\n\t}\n\tif argsHook != nil {\n\t\tif dbg {\n\t\t\tprint(\"call argsHook\\n\")\n\t\t}\n\t\tfor _, pipeline := range statements {\n\t\t\tfor _, state := range pipeline {\n\t\t\t\tstate.Args, err = argsHook(this, state.Args)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ErrorLevel(255), err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif dbg {\n\t\t\tprint(\"done argsHook\\n\")\n\t\t}\n\t}\n\tfor _, pipeline := range statements {\n\t\tfor i, state := range pipeline {\n\t\t\tif state.Term == \"|\" && (i+1 >= len(pipeline) || len(pipeline[i+1].Args) <= 0) {\n\t\t\t\treturn ErrorLevel(255), errors.New(\"The syntax of the command is incorrect.\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, pipeline := range statements {\n\n\t\tvar pipeIn *os.File = nil\n\t\tpipeSeq++\n\t\tisBackGround := this.IsBackGround\n\t\tfor _, state := range pipeline {\n\t\t\tif state.Term == \"&\" {\n\t\t\t\tisBackGround = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar wg sync.WaitGroup\n\t\tfor i, state := range pipeline {\n\t\t\tif dbg {\n\t\t\t\tprint(i, \": pipeline loop(\", state.Args[0], \")\\n\")\n\t\t\t}\n\t\t\tcmd := new(Interpreter)\n\t\t\tcmd.PipeSeq[0] = pipeSeq\n\t\t\tcmd.PipeSeq[1] = uint(1 + i)\n\t\t\tcmd.IsBackGround = isBackGround\n\t\t\tcmd.Tag = this.Tag\n\t\t\tcmd.HookCount = this.HookCount\n\t\t\tcmd.SetStdin(nvl(this.Stdio[0], os.Stdin))\n\t\t\tcmd.SetStdout(nvl(this.Stdio[1], os.Stdout))\n\t\t\tcmd.SetStderr(nvl(this.Stdio[2], os.Stderr))\n\t\t\tcmd.OnClone = this.OnClone\n\t\t\tif this.OnClone != nil {\n\t\t\t\tif err := this.OnClone(cmd); err != nil {\n\t\t\t\t\treturn ErrorLevel(255), err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar err error = nil\n\n\t\t\tif pipeIn != nil {\n\t\t\t\tcmd.SetStdin(pipeIn)\n\t\t\t\tcmd.Closers = append(cmd.Closers, pipeIn)\n\t\t\t\tpipeIn = nil\n\t\t\t}\n\n\t\t\tif state.Term[0] == '|' {\n\t\t\t\tvar pipeOut *os.File\n\t\t\t\tpipeIn, pipeOut, err = os.Pipe()\n\t\t\t\tcmd.SetStdout(pipeOut)\n\t\t\t\tif state.Term == \"|&\" {\n\t\t\t\t\tcmd.SetStderr(pipeOut)\n\t\t\t\t}\n\t\t\t\tcmd.Closers = append(cmd.Closers, pipeOut)\n\t\t\t}\n\n\t\t\tfor _, red := range state.Redirect {\n\t\t\t\tvar fd *os.File\n\t\t\t\tfd, err = red.OpenOn(cmd)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn NOERROR, err\n\t\t\t\t}\n\t\t\t\tdefer fd.Close()\n\t\t\t}\n\n\t\t\tcmd.Args = state.Args\n\t\t\tcmd.RawArgs = state.RawArgs\n\t\t\tif i > 0 {\n\t\t\t\tcmd.IsBackGround = true\n\t\t\t}\n\t\t\tif i == len(pipeline)-1 && state.Term != \"&\" {\n\t\t\t\terrorlevel, err = cmd.Spawnvp()\n\t\t\t\tErrorLevelStr = errorlevel.String()\n\t\t\t\tcmd.Close()\n\t\t\t} else {\n\t\t\t\tif !isBackGround {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t}\n\t\t\t\tgo func(cmd1 *Interpreter) {\n\t\t\t\t\tif isBackGround {\n\t\t\t\t\t\tif FLAG_AMP2NEWCONSOLE {\n\t\t\t\t\t\t\tif len(pipeline) == 1 {\n\t\t\t\t\t\t\t\tcmd1.SysProcAttr = &syscall.SysProcAttr{\n\t\t\t\t\t\t\t\t\tCreationFlags: CREATE_NEW_CONSOLE |\n\t\t\t\t\t\t\t\t\t\tCREATE_NEW_PROCESS_GROUP,\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t}\n\t\t\t\t\tcmd1.Spawnvp()\n\t\t\t\t\tcmd1.Close()\n\t\t\t\t}(cmd)\n\t\t\t}\n\t\t}\n\t\tif !isBackGround {\n\t\t\twg.Wait()\n\t\t\tif len(pipeline) > 0 {\n\t\t\t\tswitch pipeline[len(pipeline)-1].Term {\n\t\t\t\tcase \"&&\":\n\t\t\t\t\tif errorlevel != 0 {\n\t\t\t\t\t\treturn errorlevel, nil\n\t\t\t\t\t}\n\t\t\t\tcase \"||\":\n\t\t\t\t\tif errorlevel == 0 {\n\t\t\t\t\t\treturn errorlevel, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package ircclient\n\n\/\/ Handles basic IRC protocol messages (like PING)\n\nimport (\n\t\"log\"\n\t\"time\"\n)\n\ntype basicProtocol struct {\n\tic *IRCClient\n\ttimer *time.Timer\n\tlastping int64\n\tdone chan bool\n}\n\nfunc (bp *basicProtocol) Register(cl *IRCClient) {\n\tbp.ic = cl\n\tbp.done = make(chan bool)\n\t\/\/ Send a PING message every few minutes to detect locked-up\n\t\/\/ server connection\n\tgo func() {\n\t\tfor {\n\t\t\tsleep := time.NewTimer(120e9)\n\t\t\tselect {\n\t\t\tcase _ = <-bp.done:\n\t\t\t\treturn\n\t\t\tcase _ = <-sleep.C:\n\t\t\t}\n\t\t\tif bp.lastping != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbp.lastping = time.Seconds()\n\t\t\tbp.ic.conn.Output <- \"PING :client\"\n\t\t\tbp.timer = time.NewTimer(60e9) \/\/ TODO\n\t\t\tgo func() {\n\t\t\t\tselect {\n\t\t\t\tcase _ = <-bp.timer.C:\n\t\t\t\t\tbp.ic.Disconnect(\"(Client) timer expired\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n}\nfunc (bp *basicProtocol) String() string {\n\treturn \"basic\"\n}\nfunc (bp *basicProtocol) ProcessLine(msg *IRCMessage) {\n\tswitch msg.Command {\n\tcase \"PING\":\n\t\tif len(msg.Args) != 1 {\n\t\t\tlog.Printf(\"WARNING: Invalid PING received\")\n\t\t}\n\t\tbp.ic.conn.Output <- \"PONG :\" + msg.Args[0]\n\tcase \"PONG\":\n\t\tbp.lastping = 0\n\t\tbp.timer.Stop()\n\t}\n}\nfunc (bp *basicProtocol) Unregister() {\n\tbp.done <- true\n}\n\nfunc (bp *basicProtocol) Info() string {\n\treturn \"basic irc protocol (e.g. PING), implemented as plugin.\"\n}\n\nfunc (bp *basicProtocol) ProcessCommand(cmd *IRCCommand) {\n\t\/\/ TODO\n}\n<commit_msg>ircclient\/basicprotocol: add Usage<commit_after>package ircclient\n\n\/\/ Handles basic IRC protocol messages (like PING)\n\nimport (\n\t\"log\"\n\t\"time\"\n)\n\ntype basicProtocol struct {\n\tic *IRCClient\n\ttimer *time.Timer\n\tlastping int64\n\tdone chan bool\n}\n\nfunc (bp *basicProtocol) Register(cl *IRCClient) {\n\tbp.ic = cl\n\tbp.done = make(chan bool)\n\t\/\/ Send a PING message every few minutes to detect locked-up\n\t\/\/ server connection\n\tgo func() {\n\t\tfor {\n\t\t\tsleep := time.NewTimer(120e9)\n\t\t\tselect {\n\t\t\tcase _ = <-bp.done:\n\t\t\t\treturn\n\t\t\tcase _ = <-sleep.C:\n\t\t\t}\n\t\t\tif bp.lastping != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbp.lastping = time.Seconds()\n\t\t\tbp.ic.conn.Output <- \"PING :client\"\n\t\t\tbp.timer = time.NewTimer(60e9) \/\/ TODO\n\t\t\tgo func() {\n\t\t\t\tselect {\n\t\t\t\tcase _ = <-bp.timer.C:\n\t\t\t\t\tbp.ic.Disconnect(\"(Client) timer expired\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n}\n\nfunc (bp *basicProtocol) String() string {\n\treturn \"basic\"\n}\n\nfunc (bp *basicProtocol) Usage(cmd string) string {\n\t\/\/ stub, no commands here\n}\n\nfunc (bp *basicProtocol) ProcessLine(msg *IRCMessage) {\n\tswitch msg.Command {\n\tcase \"PING\":\n\t\tif len(msg.Args) != 1 {\n\t\t\tlog.Printf(\"WARNING: Invalid PING received\")\n\t\t}\n\t\tbp.ic.conn.Output <- \"PONG :\" + msg.Args[0]\n\tcase \"PONG\":\n\t\tbp.lastping = 0\n\t\tbp.timer.Stop()\n\t}\n}\nfunc (bp *basicProtocol) Unregister() {\n\tbp.done <- true\n}\n\nfunc (bp *basicProtocol) Info() string {\n\treturn \"basic irc protocol (e.g. PING), implemented as plugin.\"\n}\n\nfunc (bp *basicProtocol) ProcessCommand(cmd *IRCCommand) {\n\t\/\/ TODO\n}\n<|endoftext|>"} {"text":"<commit_before>package redis_test\n\ntype CleanupFunc func()\ntype CleanupFuncs []CleanupFunc\n\nfunc (fns CleanupFuncs) Cleanup() {\n\tfor i := len(fns) - 1; i >= 0; i-- {\n\t\tfns[i]()\n\t}\n}\n\nfunc (fns *CleanupFuncs) Register(fn CleanupFunc) {\n\t(*fns) = append(*fns, fn)\n}\n<commit_msg>remove unused file<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file defines a refactoring to rename variables, functions, methods,\n\/\/ structs, interfaces, and packages.\n\npackage refactoring\n\nimport (\n\t\"go\/ast\"\n\t\"regexp\"\n\t\"runtime\"\n\t\/\/\"fmt\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n\t\"golang-refactoring.org\/go-doctor\/analysis\/names\"\n\t\"golang-refactoring.org\/go-doctor\/filesystem\"\n\t\"golang-refactoring.org\/go-doctor\/text\"\n)\n\n\/\/ A Rename refactoring is used to rename identifiers in Go programs.\ntype Rename struct {\n\trefactoringBase\n\tnewName string\n\tsignature *types.Signature\n}\n\nfunc (r *Rename) Description() *Description {\n\treturn &Description{\n\t\tName: \"Rename\",\n\t\tSynopsis: \"Changes the name of an identifier\",\n\t\tUsage: \"<new_name>\",\n\t\tMultifile: true,\n\t\tParams: []Parameter{Parameter{\n\t\t\tLabel: \"New Name:\",\n\t\t\tPrompt: \"What to rename this identifier to.\",\n\t\t\tDefaultValue: \"\",\n\t\t}},\n\t\tQuality: Testing,\n\t}\n}\n\nfunc (r *Rename) Run(config *Config) *Result {\n\tr.refactoringBase.Run(config)\n\tif !validateArgs(config, r.Description(), r.Log) {\n\t\treturn &r.Result\n\t}\n\tr.Log.ChangeInitialErrorsToWarnings()\n\tif r.Log.ContainsErrors() {\n\t\treturn &r.Result\n\t}\n\n\tr.newName = config.Args[0].(string)\n\tif !r.isIdentifierValid(r.newName) {\n\t\tr.Log.Errorf(\"The new name %s is not a valid Go identifier\", r.newName)\n\t\treturn &r.Result\n\t}\n\n\tif r.selectedNode == nil {\n\t\tr.Log.Error(\"Please select an identifier to rename.\")\n\t\tr.Log.AssociatePos(r.selectionStart, r.selectionEnd)\n\t\treturn &r.Result\n\t}\n\n\tif r.newName == \"\" {\n\t\tr.Log.Error(\"newName cannot be empty\")\n\t\treturn &r.Result\n\t}\n\n\tswitch ident := r.selectedNode.(type) {\n\tcase *ast.Ident:\n\t\tif ast.IsExported(ident.Name) && !ast.IsExported(r.newName) {\n\t\t\tr.Log.Error(\"newName cannot be non Exportable if selected identifier name is Exportable\")\n\t\t\treturn &r.Result\n\t\t}\n\t\tif ident.Name == \"main\" && r.pkgInfo(r.fileContaining(ident)).Pkg.Name() == \"main\" {\n\t\t\tr.Log.Error(\"cannot rename main function inside main package ,it eliminates the program entry \t\t\t\t\t\t\tpoint\")\n\t\t\tr.Log.AssociateNode(ident)\n\t\t\treturn &r.Result\n\t\t}\n\n\t\tr.rename(ident)\n\t\tr.updateLog(config, false)\n\tcase *ast.BasicLit:\n\t\tfor pkg, _ := range r.program.AllPackages {\n\t\t\tif pkg.Name() == strings.Replace(ident.Value, \"\\\"\", \"\", 2) {\n\t\t\t\tsearch := names.NewSearchEngine(r.program)\n\t\t\t\tsearchResult := search.PackageRename(pkg.Name())\n\t\t\t\tr.addOccurrences(searchResult)\n\t\t\t\tr.addFileSystemChanges(searchResult, pkg.Name())\n\t\t\t}\n\t\t}\n\t\tr.updateLog(config, false)\n\tdefault:\n\t\tr.Log.Error(\"Please select an identifier to rename.\")\n\t\tr.Log.AssociatePos(r.selectionStart, r.selectionEnd)\n\t}\n\treturn &r.Result\n}\n\nfunc (r *Rename) isIdentifierValid(newName string) bool {\n\tmatched, err := regexp.MatchString(\"^\\\\p{L}[\\\\p{L}\\\\p{N}]*$\", newName)\n\tif matched && err == nil {\n\t\tkeyword, err := regexp.MatchString(\"^(break|case|chan|const|continue|default|defer|else|fallthrough|for|func|go|goto|if|import|interface|map|package|range|return|select|struct|switch|type|var)$\", newName)\n\t\treturn !keyword && err == nil\n\t}\n\treturn false\n}\n\nfunc (r *Rename) rename(ident *ast.Ident) {\n\tif !r.identExists(ident) {\n\t\tsearch := names.NewSearchEngine(r.program)\n\t\tsearchResult, err := search.FindOccurrences(ident)\n\t\tif err != nil {\n\t\t\tr.Log.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tr.addOccurrences(searchResult)\n\t\tif search.IsPackageName(ident) {\n\t\t\tr.addFileSystemChanges(searchResult, ident.Name)\n\t\t}\n\t\t\/\/TODO: r.checkForErrors()\n\t\treturn\n\t}\n\n}\n\n\/\/IdentifierExists checks if there already exists an Identifier with the newName,with in the scope of the oldname.\nfunc (r *Rename) identExists(ident *ast.Ident) bool {\n\n\tobj := r.pkgInfo(r.fileContaining(ident)).ObjectOf(ident)\n\tsearch := names.NewSearchEngine(r.program)\n\n\tif obj == nil && !search.IsPackageName(ident) {\n\n\t\tr.Log.Error(\"unable to find declaration of selected identifier\")\n\t\tr.Log.AssociateNode(ident)\n\t\treturn true\n\t}\n\n\tif search.IsPackageName(ident) {\n\t\treturn false\n\t}\n\tidentscope := obj.Parent()\n\n\tif names.IsMethod(obj) {\n\t\tobjfound, _, pointerindirections := types.LookupFieldOrMethod(names.MethodReceiver(obj).Type(), obj.Pkg(), r.newName)\n\t\tif names.IsMethod(objfound) && pointerindirections {\n\t\t\tr.Log.Error(\"newname already exists in scope,please select other value for the newname\")\n\t\t\tr.Log.AssociateNode(ident)\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif identscope.LookupParent(r.newName) != nil {\n\t\tr.Log.Error(\"newname already exists in scope,please select other value for the newname\")\n\t\tr.Log.AssociateNode(ident)\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/addOccurrences adds all the Occurences to the editset\nfunc (r *Rename) addOccurrences(allOccurrences map[string][]text.Extent) {\n\tfor filename, occurrences := range allOccurrences {\n\t\tif isInGoRoot(filename) {\n\t\t\tr.Log.Warnf(\"Occurrences in %s will not be renamed\",\n\t\t\t\tfilename)\n\t\t} else {\n\t\t\tfor _, occurrence := range occurrences {\n\t\t\t\tif r.Edits[filename] == nil {\n\t\t\t\t\tr.Edits[filename] = text.NewEditSet()\n\t\t\t\t}\n\t\t\t\tr.Edits[filename].Add(occurrence, r.newName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc isInGoRoot(absPath string) bool {\n\tgoRoot := runtime.GOROOT()\n\tif !strings.HasSuffix(goRoot, string(filepath.Separator)) {\n\t\tgoRoot += string(filepath.Separator)\n\t}\n\treturn strings.HasPrefix(absPath, goRoot)\n}\n\nfunc (r *Rename) addFileSystemChanges(allOccurrences map[string][]text.Extent, identName string) {\n\tfor filename, _ := range allOccurrences {\n\n\t\tif filepath.Base(filepath.Dir(filename)) == identName && allFilesinDirectoryhaveSamePkg(filepath.Dir(filename), identName) {\n\t\t\tchg := &filesystem.Rename{filepath.Dir(filename), r.newName}\n\t\t\tr.FSChanges = append(r.FSChanges, chg)\n\t\t}\n\t}\n}\n\nfunc allFilesinDirectoryhaveSamePkg(directorypath string, identName string) bool {\n\n\tvar renamefile bool = false\n\tfileInfos, _ := ioutil.ReadDir(directorypath)\n\n\tfor _, file := range fileInfos {\n\t\tif strings.HasSuffix(file.Name(), \".go\") {\n\t\t\tfset := token.NewFileSet()\n\t\t\tf, err := parser.ParseFile(fset, filepath.Join(directorypath, file.Name()), nil, 0)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif f.Name.Name == identName {\n\t\t\t\trenamefile = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn renamefile\n}\n<commit_msg>Updates for latest go.tools, see below<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file defines a refactoring to rename variables, functions, methods,\n\/\/ structs, interfaces, and packages.\n\npackage refactoring\n\nimport (\n\t\"go\/ast\"\n\t\"regexp\"\n\t\"runtime\"\n\t\/\/\"fmt\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n\t\"golang-refactoring.org\/go-doctor\/analysis\/names\"\n\t\"golang-refactoring.org\/go-doctor\/filesystem\"\n\t\"golang-refactoring.org\/go-doctor\/text\"\n)\n\n\/\/ A Rename refactoring is used to rename identifiers in Go programs.\ntype Rename struct {\n\trefactoringBase\n\tnewName string\n\tsignature *types.Signature\n}\n\nfunc (r *Rename) Description() *Description {\n\treturn &Description{\n\t\tName: \"Rename\",\n\t\tSynopsis: \"Changes the name of an identifier\",\n\t\tUsage: \"<new_name>\",\n\t\tMultifile: true,\n\t\tParams: []Parameter{Parameter{\n\t\t\tLabel: \"New Name:\",\n\t\t\tPrompt: \"What to rename this identifier to.\",\n\t\t\tDefaultValue: \"\",\n\t\t}},\n\t\tQuality: Testing,\n\t}\n}\n\nfunc (r *Rename) Run(config *Config) *Result {\n\tr.refactoringBase.Run(config)\n\tif !validateArgs(config, r.Description(), r.Log) {\n\t\treturn &r.Result\n\t}\n\tr.Log.ChangeInitialErrorsToWarnings()\n\tif r.Log.ContainsErrors() {\n\t\treturn &r.Result\n\t}\n\n\tr.newName = config.Args[0].(string)\n\tif !r.isIdentifierValid(r.newName) {\n\t\tr.Log.Errorf(\"The new name %s is not a valid Go identifier\", r.newName)\n\t\treturn &r.Result\n\t}\n\n\tif r.selectedNode == nil {\n\t\tr.Log.Error(\"Please select an identifier to rename.\")\n\t\tr.Log.AssociatePos(r.selectionStart, r.selectionEnd)\n\t\treturn &r.Result\n\t}\n\n\tif r.newName == \"\" {\n\t\tr.Log.Error(\"newName cannot be empty\")\n\t\treturn &r.Result\n\t}\n\n\tswitch ident := r.selectedNode.(type) {\n\tcase *ast.Ident:\n\t\tif ast.IsExported(ident.Name) && !ast.IsExported(r.newName) {\n\t\t\tr.Log.Error(\"newName cannot be non Exportable if selected identifier name is Exportable\")\n\t\t\treturn &r.Result\n\t\t}\n\t\tif ident.Name == \"main\" && r.pkgInfo(r.fileContaining(ident)).Pkg.Name() == \"main\" {\n\t\t\tr.Log.Error(\"cannot rename main function inside main package ,it eliminates the program entry \t\t\t\t\t\t\tpoint\")\n\t\t\tr.Log.AssociateNode(ident)\n\t\t\treturn &r.Result\n\t\t}\n\n\t\tr.rename(ident)\n\t\tr.updateLog(config, false)\n\tcase *ast.BasicLit:\n\t\tfor pkg, _ := range r.program.AllPackages {\n\t\t\tif pkg.Name() == strings.Replace(ident.Value, \"\\\"\", \"\", 2) {\n\t\t\t\tsearch := names.NewSearchEngine(r.program)\n\t\t\t\tsearchResult := search.PackageRename(pkg.Name())\n\t\t\t\tr.addOccurrences(searchResult)\n\t\t\t\tr.addFileSystemChanges(searchResult, pkg.Name())\n\t\t\t}\n\t\t}\n\t\tr.updateLog(config, false)\n\tdefault:\n\t\tr.Log.Error(\"Please select an identifier to rename.\")\n\t\tr.Log.AssociatePos(r.selectionStart, r.selectionEnd)\n\t}\n\treturn &r.Result\n}\n\nfunc (r *Rename) isIdentifierValid(newName string) bool {\n\tmatched, err := regexp.MatchString(\"^\\\\p{L}[\\\\p{L}\\\\p{N}]*$\", newName)\n\tif matched && err == nil {\n\t\tkeyword, err := regexp.MatchString(\"^(break|case|chan|const|continue|default|defer|else|fallthrough|for|func|go|goto|if|import|interface|map|package|range|return|select|struct|switch|type|var)$\", newName)\n\t\treturn !keyword && err == nil\n\t}\n\treturn false\n}\n\nfunc (r *Rename) rename(ident *ast.Ident) {\n\tif !r.identExists(ident) {\n\t\tsearch := names.NewSearchEngine(r.program)\n\t\tsearchResult, err := search.FindOccurrences(ident)\n\t\tif err != nil {\n\t\t\tr.Log.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tr.addOccurrences(searchResult)\n\t\tif search.IsPackageName(ident) {\n\t\t\tr.addFileSystemChanges(searchResult, ident.Name)\n\t\t}\n\t\t\/\/TODO: r.checkForErrors()\n\t\treturn\n\t}\n\n}\n\n\/\/IdentifierExists checks if there already exists an Identifier with the newName,with in the scope of the oldname.\nfunc (r *Rename) identExists(ident *ast.Ident) bool {\n\tobj := r.pkgInfo(r.fileContaining(ident)).ObjectOf(ident)\n\tsearch := names.NewSearchEngine(r.program)\n\n\tif obj == nil && !search.IsPackageName(ident) {\n\n\t\tr.Log.Error(\"unable to find declaration of selected identifier\")\n\t\tr.Log.AssociateNode(ident)\n\t\treturn true\n\t}\n\n\tif search.IsPackageName(ident) {\n\t\treturn false\n\t}\n\tidentscope := obj.Parent()\n\n\tif names.IsMethod(obj) {\n\t\tobjfound, _, pointerindirections := types.LookupFieldOrMethod(names.MethodReceiver(obj).Type(), true, obj.Pkg(), r.newName)\n\t\tif names.IsMethod(objfound) && pointerindirections {\n\t\t\tr.Log.Error(\"newname already exists in scope,please select other value for the newname\")\n\t\t\tr.Log.AssociateNode(ident)\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif identscope.LookupParent(r.newName) != nil {\n\t\tr.Log.Error(\"newname already exists in scope,please select other value for the newname\")\n\t\tr.Log.AssociateNode(ident)\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/addOccurrences adds all the Occurences to the editset\nfunc (r *Rename) addOccurrences(allOccurrences map[string][]text.Extent) {\n\tfor filename, occurrences := range allOccurrences {\n\t\tif isInGoRoot(filename) {\n\t\t\tr.Log.Warnf(\"Occurrences in %s will not be renamed\",\n\t\t\t\tfilename)\n\t\t} else {\n\t\t\tfor _, occurrence := range occurrences {\n\t\t\t\tif r.Edits[filename] == nil {\n\t\t\t\t\tr.Edits[filename] = text.NewEditSet()\n\t\t\t\t}\n\t\t\t\tr.Edits[filename].Add(occurrence, r.newName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc isInGoRoot(absPath string) bool {\n\tgoRoot := runtime.GOROOT()\n\tif !strings.HasSuffix(goRoot, string(filepath.Separator)) {\n\t\tgoRoot += string(filepath.Separator)\n\t}\n\treturn strings.HasPrefix(absPath, goRoot)\n}\n\nfunc (r *Rename) addFileSystemChanges(allOccurrences map[string][]text.Extent, identName string) {\n\tfor filename, _ := range allOccurrences {\n\n\t\tif filepath.Base(filepath.Dir(filename)) == identName && allFilesinDirectoryhaveSamePkg(filepath.Dir(filename), identName) {\n\t\t\tchg := &filesystem.Rename{filepath.Dir(filename), r.newName}\n\t\t\tr.FSChanges = append(r.FSChanges, chg)\n\t\t}\n\t}\n}\n\nfunc allFilesinDirectoryhaveSamePkg(directorypath string, identName string) bool {\n\n\tvar renamefile bool = false\n\tfileInfos, _ := ioutil.ReadDir(directorypath)\n\n\tfor _, file := range fileInfos {\n\t\tif strings.HasSuffix(file.Name(), \".go\") {\n\t\t\tfset := token.NewFileSet()\n\t\t\tf, err := parser.ParseFile(fset, filepath.Join(directorypath, file.Name()), nil, 0)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif f.Name.Name == identName {\n\t\t\t\trenamefile = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn renamefile\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/control-center\/serviced\/utils\"\n)\n\ntype ThinPoolCreate struct {\n\tArgs struct {\n\t\tPurpose string `description:\"Purpose of the thin pool (docker|serviced)\"`\n\t\tDevices []string `description:\"Block devices to use\" required:\"1\"`\n\t} `positional-args:\"yes\" required:\"yes\"`\n}\n\ntype LogicalVolumeInfo struct {\n\tName string\n\tKernelMajor uint\n\tKernelMinor uint\n}\n\n\/\/ runCommand runs the command and returns the stdout, stderr, exit code, and error.\n\/\/ If the command ran but returned non-zero, the error is nil\nfunc runCommand(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) {\n\tvar stderrBuffer bytes.Buffer\n\tvar stdoutBuffer bytes.Buffer\n\tcmd.Stderr = &stderrBuffer\n\tcmd.Stdout = &stdoutBuffer\n\tcmdErr := cmd.Run()\n\texitCode, success := utils.GetExitStatus(cmdErr)\n\tif success {\n\t\tcmdErr = nil\n\t}\n\treturn stdoutBuffer.String(), stderrBuffer.String(), exitCode, cmdErr\n}\n\nfunc (c *ThinPoolCreate) Execute(args []string) error {\n\tApp.initializeLogging()\n\tpurpose := c.Args.Purpose\n\tdevices := c.Args.Devices\n\tlogger := log.WithFields(log.Fields{\n\t\t\"purpose\": purpose,\n\t\t\"devices\": devices,\n\t})\n\tif purpose != \"serviced\" && purpose != \"docker\" {\n\t\tlogger.Fatal(\"Purpose must be one of (docker, serviced)\")\n\t}\n\n\tlogger.Info(\"Creating thin-pool\")\n\tthinPoolName, err := createThinPool(purpose, devices)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tfmt.Printf(\"Created thin-pool device '%s'\\n\", thinPoolName)\n\n\treturn nil\n}\n\nfunc createThinPool(purpose string, devices []string) (string, error) {\n\tif err := ensurePhysicalDevices(devices); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvolumeGroup := purpose\n\tif err := createVolumeGroup(volumeGroup, devices); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmetadataVolume, err := createMetadataVolume(volumeGroup)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdataVolume, err := createDataVolume(volumeGroup)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = convertToThinPool(volumeGroup, dataVolume, metadataVolume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tthinPoolName, err := getThinPoolNameForLogicalVolume(volumeGroup, dataVolume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn thinPoolName, nil\n}\n\nfunc ensurePhysicalDevices(devices []string) error {\n\tfor _, device := range devices {\n\t\tcmd := exec.Command(\"pvs\", device)\n\t\t_, _, exitCode, err := runCommand(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exitCode == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\targs := []string{\"pvcreate\", device}\n\t\tlog.Info(strings.Join(args, \" \"))\n\t\tcmd = exec.Command(args[0], args[1:]...)\n\t\tstdout, stderr, exitCode, err := runCommand(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exitCode != 0 {\n\t\t\treturn fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t\t}\n\t\tlog.Info(stdout)\n\t}\n\treturn nil\n}\n\nfunc createVolumeGroup(volumeGroup string, devices []string) error {\n\targs := append([]string{\"vgcreate\", volumeGroup}, devices...)\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exitCode != 0 {\n\t\treturn fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t}\n\tlog.Info(stdout)\n\treturn nil\n}\n\nfunc createMetadataVolume(volumeGroup string) (string, error) {\n\tunits := \"s\" \/\/ volume size will be measured in sectors\n\ttotalSize, err := getVolumeGroupSize(volumeGroup, units)\n\tmetadataSize := (totalSize + 999) \/ 1000\n\tmetadataName := volumeGroup + \"-meta\"\n\n\targs := []string{\"lvcreate\",\n\t\t\"--size\", fmt.Sprintf(\"%d%s\", metadataSize, units),\n\t\t\"--name\", metadataName,\n\t\tvolumeGroup}\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif exitCode != 0 {\n\t\treturn \"\", fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t}\n\tlog.Info(stdout)\n\treturn metadataName, err\n}\n\nfunc createDataVolume(volumeGroup string) (string, error) {\n\tunits := \"b\" \/\/ volume size will be measured in bytes\n\ttotalSize, err := getVolumeGroupSize(volumeGroup, units)\n\tdataSize := (totalSize*90\/100 + 511) &^ 511\n\tdataName := volumeGroup + \"-pool\"\n\n\targs := []string{\"lvcreate\",\n\t\t\"--size\", fmt.Sprintf(\"%d%s\", dataSize, units),\n\t\t\"--name\", dataName,\n\t\tvolumeGroup}\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif exitCode != 0 {\n\t\treturn \"\", fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t}\n\tlog.Info(stdout)\n\treturn dataName, err\n}\n\nfunc convertToThinPool(volumeGroup, dataVolume string, metadataVolume string) error {\n\targs := []string{\"lvconvert\",\n\t\t\"--zero\", \"n\",\n\t\t\"--thinpool\", fmt.Sprintf(\"%s\/%s\", volumeGroup, dataVolume),\n\t\t\"--poolmetadata\", fmt.Sprintf(\"%s\/%s\", volumeGroup, metadataVolume),\n\t}\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exitCode != 0 {\n\t\treturn fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t}\n\tlog.Info(stdout)\n\treturn nil\n}\n\nfunc getVolumeGroupSize(volumeGroup string, units string) (uint64, error) {\n\targs := []string{\"vgs\",\n\t\t\"--noheadings\",\n\t\t\"--nosuffix\",\n\t\t\"--units\", units,\n\t\t\"--options\", \"vg_free\",\n\t\tvolumeGroup}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif exitCode != 0 {\n\t\treturn 0, fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t}\n\n\tsizeString := strings.Trim(stdout, \" \\n\")\n\tsize, err := strconv.ParseUint(sizeString, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn size, nil\n}\n\nfunc getInfoForLogicalVolume(volumeGroup string, logicalVolume string) (LogicalVolumeInfo, error) {\n\tlvi := LogicalVolumeInfo{}\n\targs := []string{\"lvs\",\n\t\t\"--noheadings\",\n\t\t\"--nameprefixes\",\n\t\t\"--options\", \"lv_name,lv_kernel_major,lv_kernel_minor\",\n\t\tvolumeGroup}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn lvi, err\n\t}\n\tif exitCode != 0 {\n\t\treturn lvi, fmt.Errorf(\"Error(%d) running '%s':\\n%s\",\n\t\t\texitCode, strings.Join(args, \" \"), stderr)\n\t}\n\n\tparseError := fmt.Errorf(\"Failed to parse command output:\\n'%s'\\n%s\",\n\t\tstrings.Join(args, \" \"), stdout)\n\n\t\/\/ Example command output:\n\t\/\/ LVM2_LV_NAME='docker-pool' LVM2_LV_KERNEL_MAJOR='252' LVM2_LV_KERNEL_MINOR='4'\n\tregexName := regexp.MustCompile(\"LVM2_LV_NAME='(.+?)'\")\n\tregexMajor := regexp.MustCompile(\"LVM2_LV_KERNEL_MAJOR='(.+?)'\")\n\tregexMinor := regexp.MustCompile(\"LVM2_LV_KERNEL_MINOR='(.+?)'\")\n\tfor _, line := range strings.Split(stdout, \"\\n\") {\n\t\tmatch := regexName.FindStringSubmatch(line)\n\t\tif len(match) != 2 || match[1] != logicalVolume {\n\t\t\tcontinue\n\t\t}\n\n\t\tmatch = regexMajor.FindStringSubmatch(line)\n\t\tif len(match) != 2 {\n\t\t\treturn lvi, parseError\n\t\t}\n\t\tmajor, err := strconv.ParseUint(match[1], 10, 32)\n\t\tif err != nil {\n\t\t\treturn lvi, parseError\n\t\t}\n\n\t\tmatch = regexMinor.FindStringSubmatch(line)\n\t\tif len(match) != 2 {\n\t\t\treturn lvi, parseError\n\t\t}\n\t\tminor, err := strconv.ParseUint(match[1], 10, 32)\n\t\tif err != nil {\n\t\t\treturn lvi, parseError\n\t\t}\n\n\t\tlvi.Name = logicalVolume\n\t\tlvi.KernelMajor = uint(major)\n\t\tlvi.KernelMinor = uint(minor)\n\t\treturn lvi, nil\n\t}\n\n\treturn lvi, fmt.Errorf(\"Failed to find logical volume: '%s'\", name)\n}\n\nfunc getThinPoolNameForLogicalVolume(volumeGroup string, logicalVolume string) (string, error) {\n\tinfo, err := getInfoForLogicalVolume(volumeGroup, logicalVolume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfilename := fmt.Sprintf(\"\/sys\/dev\/block\/%d:%d\/dm\/name\",\n\t\tinfo.KernelMajor, info.KernelMinor)\n\tcontents, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error reading %s: %s\", filename, err)\n\t}\n\treturn strings.Trim(string(contents), \"\\n\"), nil\n}\n<commit_msg>Normalize exit code error handling<commit_after>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/control-center\/serviced\/utils\"\n)\n\ntype ThinPoolCreate struct {\n\tArgs struct {\n\t\tPurpose string `description:\"Purpose of the thin pool (docker|serviced)\"`\n\t\tDevices []string `description:\"Block devices to use\" required:\"1\"`\n\t} `positional-args:\"yes\" required:\"yes\"`\n}\n\ntype LogicalVolumeInfo struct {\n\tName string\n\tKernelMajor uint\n\tKernelMinor uint\n}\n\n\/\/ runCommand runs the command and returns the stdout, stderr, exit code, and error.\n\/\/ If the command ran but returned non-zero, the error is nil\nfunc runCommand(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) {\n\tvar stderrBuffer bytes.Buffer\n\tvar stdoutBuffer bytes.Buffer\n\tcmd.Stderr = &stderrBuffer\n\tcmd.Stdout = &stdoutBuffer\n\tcmdErr := cmd.Run()\n\texitCode, success := utils.GetExitStatus(cmdErr)\n\tif success {\n\t\tcmdErr = nil\n\t}\n\treturn stdoutBuffer.String(), stderrBuffer.String(), exitCode, cmdErr\n}\n\nfunc checkCommand(cmd *exec.Cmd) (stdout string, stderr string, err error) {\n\tstdout, stderr, exitCode, err := runCommand(cmd)\n\tif err != nil {\n\t\treturn stdout, stderr, err\n\t}\n\tif exitCode != 0 {\n\t\treturn stdout, stderr, fmt.Errorf(\"Error(%d) running command '%s':\\n%s\",\n\t\t\texitCode, strings.Join(cmd.Args, \" \"), stderr)\n\t}\n\treturn stdout, stderr, nil\n}\n\nfunc (c *ThinPoolCreate) Execute(args []string) error {\n\tApp.initializeLogging()\n\tpurpose := c.Args.Purpose\n\tdevices := c.Args.Devices\n\tlogger := log.WithFields(log.Fields{\n\t\t\"purpose\": purpose,\n\t\t\"devices\": devices,\n\t})\n\tif purpose != \"serviced\" && purpose != \"docker\" {\n\t\tlogger.Fatal(\"Purpose must be one of (docker, serviced)\")\n\t}\n\n\tlogger.Info(\"Creating thin-pool\")\n\tthinPoolName, err := createThinPool(purpose, devices)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tfmt.Printf(\"Created thin-pool device '%s'\\n\", thinPoolName)\n\n\treturn nil\n}\n\nfunc createThinPool(purpose string, devices []string) (string, error) {\n\tif err := ensurePhysicalDevices(devices); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvolumeGroup := purpose\n\tif err := createVolumeGroup(volumeGroup, devices); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmetadataVolume, err := createMetadataVolume(volumeGroup)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdataVolume, err := createDataVolume(volumeGroup)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = convertToThinPool(volumeGroup, dataVolume, metadataVolume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tthinPoolName, err := getThinPoolNameForLogicalVolume(volumeGroup, dataVolume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn thinPoolName, nil\n}\n\nfunc ensurePhysicalDevices(devices []string) error {\n\tfor _, device := range devices {\n\t\tcmd := exec.Command(\"pvs\", device)\n\t\t_, _, exitCode, err := runCommand(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exitCode == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\targs := []string{\"pvcreate\", device}\n\t\tlog.Info(strings.Join(args, \" \"))\n\t\tcmd = exec.Command(args[0], args[1:]...)\n\t\tstdout, _, err := checkCommand(cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Info(stdout)\n\t}\n\treturn nil\n}\n\nfunc createVolumeGroup(volumeGroup string, devices []string) error {\n\targs := append([]string{\"vgcreate\", volumeGroup}, devices...)\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, _, err := checkCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(stdout)\n\treturn nil\n}\n\nfunc createMetadataVolume(volumeGroup string) (string, error) {\n\tunits := \"s\" \/\/ volume size will be measured in sectors\n\ttotalSize, err := getVolumeGroupSize(volumeGroup, units)\n\tmetadataSize := (totalSize + 999) \/ 1000\n\tmetadataName := volumeGroup + \"-meta\"\n\n\targs := []string{\"lvcreate\",\n\t\t\"--size\", fmt.Sprintf(\"%d%s\", metadataSize, units),\n\t\t\"--name\", metadataName,\n\t\tvolumeGroup}\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, _, err := checkCommand(cmd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlog.Info(stdout)\n\treturn metadataName, err\n}\n\nfunc createDataVolume(volumeGroup string) (string, error) {\n\tunits := \"b\" \/\/ volume size will be measured in bytes\n\ttotalSize, err := getVolumeGroupSize(volumeGroup, units)\n\tdataSize := (totalSize*90\/100 + 511) &^ 511\n\tdataName := volumeGroup + \"-pool\"\n\n\targs := []string{\"lvcreate\",\n\t\t\"--size\", fmt.Sprintf(\"%d%s\", dataSize, units),\n\t\t\"--name\", dataName,\n\t\tvolumeGroup}\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, _, err := checkCommand(cmd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlog.Info(stdout)\n\treturn dataName, err\n}\n\nfunc convertToThinPool(volumeGroup, dataVolume string, metadataVolume string) error {\n\targs := []string{\"lvconvert\",\n\t\t\"--zero\", \"n\",\n\t\t\"--thinpool\", fmt.Sprintf(\"%s\/%s\", volumeGroup, dataVolume),\n\t\t\"--poolmetadata\", fmt.Sprintf(\"%s\/%s\", volumeGroup, metadataVolume),\n\t}\n\tlog.Info(strings.Join(args, \" \"))\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, _, err := checkCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(stdout)\n\treturn nil\n}\n\nfunc getVolumeGroupSize(volumeGroup string, units string) (uint64, error) {\n\targs := []string{\"vgs\",\n\t\t\"--noheadings\",\n\t\t\"--nosuffix\",\n\t\t\"--units\", units,\n\t\t\"--options\", \"vg_free\",\n\t\tvolumeGroup}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, _, err := checkCommand(cmd)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tsizeString := strings.Trim(stdout, \" \\n\")\n\tsize, err := strconv.ParseUint(sizeString, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn size, nil\n}\n\nfunc getInfoForLogicalVolume(volumeGroup string, logicalVolume string) (LogicalVolumeInfo, error) {\n\tlvi := LogicalVolumeInfo{}\n\targs := []string{\"lvs\",\n\t\t\"--noheadings\",\n\t\t\"--nameprefixes\",\n\t\t\"--options\", \"lv_name,lv_kernel_major,lv_kernel_minor\",\n\t\tvolumeGroup}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tstdout, _, err := checkCommand(cmd)\n\tif err != nil {\n\t\treturn lvi, err\n\t}\n\n\tparseError := fmt.Errorf(\"Failed to parse command output:\\n'%s'\\n%s\",\n\t\tstrings.Join(args, \" \"), stdout)\n\n\t\/\/ Example command output:\n\t\/\/ LVM2_LV_NAME='docker-pool' LVM2_LV_KERNEL_MAJOR='252' LVM2_LV_KERNEL_MINOR='4'\n\tregexName := regexp.MustCompile(\"LVM2_LV_NAME='(.+?)'\")\n\tregexMajor := regexp.MustCompile(\"LVM2_LV_KERNEL_MAJOR='(.+?)'\")\n\tregexMinor := regexp.MustCompile(\"LVM2_LV_KERNEL_MINOR='(.+?)'\")\n\tfor _, line := range strings.Split(stdout, \"\\n\") {\n\t\tmatch := regexName.FindStringSubmatch(line)\n\t\tif len(match) != 2 || match[1] != logicalVolume {\n\t\t\tcontinue\n\t\t}\n\n\t\tmatch = regexMajor.FindStringSubmatch(line)\n\t\tif len(match) != 2 {\n\t\t\treturn lvi, parseError\n\t\t}\n\t\tmajor, err := strconv.ParseUint(match[1], 10, 32)\n\t\tif err != nil {\n\t\t\treturn lvi, parseError\n\t\t}\n\n\t\tmatch = regexMinor.FindStringSubmatch(line)\n\t\tif len(match) != 2 {\n\t\t\treturn lvi, parseError\n\t\t}\n\t\tminor, err := strconv.ParseUint(match[1], 10, 32)\n\t\tif err != nil {\n\t\t\treturn lvi, parseError\n\t\t}\n\n\t\tlvi.Name = logicalVolume\n\t\tlvi.KernelMajor = uint(major)\n\t\tlvi.KernelMinor = uint(minor)\n\t\treturn lvi, nil\n\t}\n\n\treturn lvi, fmt.Errorf(\"Failed to find logical volume: '%s'\", name)\n}\n\nfunc getThinPoolNameForLogicalVolume(volumeGroup string, logicalVolume string) (string, error) {\n\tinfo, err := getInfoForLogicalVolume(volumeGroup, logicalVolume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfilename := fmt.Sprintf(\"\/sys\/dev\/block\/%d:%d\/dm\/name\",\n\t\tinfo.KernelMajor, info.KernelMinor)\n\tcontents, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error reading %s: %s\", filename, err)\n\t}\n\treturn strings.Trim(string(contents), \"\\n\"), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package suite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"qa\/runner\"\n\t\"qa\/runner\/server\"\n\t\"qa\/tapjio\"\n)\n\ntype testEventUnion struct {\n\ttestBegan *tapjio.TestStartedEvent\n\ttestEvent *tapjio.TestEvent\n\ttestError error\n}\n\ntype testSuiteRunner struct {\n\tseed int\n\trunners []runner.TestRunner\n\tcount int\n\tsrv *server.Server\n}\n\nfunc NewTestSuiteRunner(seed int,\n\tsrv *server.Server,\n\trunners []runner.TestRunner) *testSuiteRunner {\n\n\tcount := 0\n\tfor _, runner := range runners {\n\t\tcount += runner.TestCount()\n\t}\n\n\treturn &testSuiteRunner{\n\t\tseed: seed,\n\t\trunners: runners,\n\t\tcount: count,\n\t\tsrv: srv,\n\t}\n}\n\nfunc (self *testSuiteRunner) Run(\n\tworkerEnvs []map[string]string,\n\tvisitor tapjio.Visitor) (final tapjio.FinalEvent, err error) {\n\n\tnumWorkers := len(workerEnvs)\n\tstartTime := time.Now().UTC()\n\n\tsuite := tapjio.NewSuiteEvent(startTime, self.count, self.seed)\n\tfinal = *tapjio.NewFinalEvent(suite)\n\n\terr = visitor.SuiteStarted(*suite)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tfinal.Time = time.Now().UTC().Sub(startTime).Seconds()\n\n\t\tfinalErr := visitor.SuiteFinished(final)\n\t\tif err == nil {\n\t\t\terr = finalErr\n\t\t}\n\t}()\n\n\tvar testRunnerChan = make(chan runner.TestRunner, numWorkers)\n\n\t\/\/ Enqueue each testRunner on testRunnerChan\n\tgo func() {\n\t\t\/\/ Sort runners by test count. This heuristic helps our workers avoid being idle\n\t\t\/\/ near the end of the run by running testRunners with the most tests first, avoiding\n\t\t\/\/ scenarios where the last testRunner we run has many tests, causing the entire test\n\t\t\/\/ run to drag on needlessly while other workers are idle.\n\t\trunner.By(func(r1, r2 *runner.TestRunner) bool { return (*r2).TestCount() < (*r1).TestCount() }).Sort(self.runners)\n\n\t\tfor _, testRunner := range self.runners {\n\t\t\ttestRunnerChan <- testRunner\n\t\t}\n\t\tclose(testRunnerChan)\n\t}()\n\n\tvar abort = false\n\tvar traceChan = make(chan tapjio.TraceEvent, numWorkers)\n\tvar testChan = make(chan testEventUnion, numWorkers)\n\n\tvar awaitJobs sync.WaitGroup\n\tawaitJobs.Add(numWorkers)\n\n\tfor _, workerEnv := range workerEnvs {\n\t\tenv := workerEnv\n\t\tgo func() {\n\t\t\tdefer awaitJobs.Done()\n\t\t\tfor testRunner := range testRunnerChan {\n\t\t\t\tif abort {\n\t\t\t\t\tfor i := testRunner.TestCount(); i > 0; i-- {\n\t\t\t\t\t\ttestChan <- testEventUnion{testError: errors.New(\"already aborted\")}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar awaitRun sync.WaitGroup\n\t\t\t\t\tawaitRun.Add(1)\n\t\t\t\t\ttestRunner.Run(\n\t\t\t\t\t\tenv,\n\t\t\t\t\t\ttapjio.DecodingCallbacks{\n\t\t\t\t\t\t\tOnTestBegin: func(test tapjio.TestStartedEvent) error {\n\t\t\t\t\t\t\t\ttestChan <- testEventUnion{&test, nil, nil}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOnTest: func(test tapjio.TestEvent) error {\n\t\t\t\t\t\t\t\ttestChan <- testEventUnion{nil, &test, nil}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOnTrace: func(trace tapjio.TraceEvent) error {\n\t\t\t\t\t\t\t\ttraceChan <- trace\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOnEnd: func(reason error) error {\n\t\t\t\t\t\t\t\tawaitRun.Done()\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\t\t\t\t\tawaitRun.Wait()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tawaitJobs.Wait()\n\t\tclose(traceChan)\n\t}()\n\n\tfor traceChan != nil {\n\t\tselect {\n\t\tcase trace, ok := <-traceChan:\n\t\t\tif !ok {\n\t\t\t\ttraceChan = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = visitor.TraceEvent(trace)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase testEventUnion := <-testChan:\n\t\t\tbegin := testEventUnion.testBegan\n\t\t\tif begin != nil {\n\t\t\t\terr = visitor.TestStarted(*begin)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = testEventUnion.testError\n\t\t\t\tif err == nil {\n\t\t\t\t\ttest := testEventUnion.testEvent\n\t\t\t\t\tfinal.Counts.Increment(test.Status)\n\n\t\t\t\t\terr = visitor.TestFinished(*test)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tabort = true\n\t\t\t\t\tfinal.Counts.Increment(tapjio.Error)\n\t\t\t\t\tfmt.Fprintln(os.Stderr, \"Error:\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>Fix another race in worker result processing<commit_after>package suite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"qa\/runner\"\n\t\"qa\/runner\/server\"\n\t\"qa\/tapjio\"\n)\n\ntype testEventUnion struct {\n\ttrace *tapjio.TraceEvent\n\ttestBegan *tapjio.TestStartedEvent\n\ttestEvent *tapjio.TestEvent\n\ttestError error\n}\n\ntype testSuiteRunner struct {\n\tseed int\n\trunners []runner.TestRunner\n\tcount int\n\tsrv *server.Server\n}\n\nfunc NewTestSuiteRunner(seed int,\n\tsrv *server.Server,\n\trunners []runner.TestRunner) *testSuiteRunner {\n\n\tcount := 0\n\tfor _, runner := range runners {\n\t\tcount += runner.TestCount()\n\t}\n\n\treturn &testSuiteRunner{\n\t\tseed: seed,\n\t\trunners: runners,\n\t\tcount: count,\n\t\tsrv: srv,\n\t}\n}\n\nfunc (self *testSuiteRunner) Run(\n\tworkerEnvs []map[string]string,\n\tvisitor tapjio.Visitor) (final tapjio.FinalEvent, err error) {\n\n\tnumWorkers := len(workerEnvs)\n\tstartTime := time.Now().UTC()\n\n\tsuite := tapjio.NewSuiteEvent(startTime, self.count, self.seed)\n\tfinal = *tapjio.NewFinalEvent(suite)\n\n\terr = visitor.SuiteStarted(*suite)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tfinal.Time = time.Now().UTC().Sub(startTime).Seconds()\n\n\t\tfinalErr := visitor.SuiteFinished(final)\n\t\tif err == nil {\n\t\t\terr = finalErr\n\t\t}\n\t}()\n\n\tvar testRunnerChan = make(chan runner.TestRunner, numWorkers)\n\n\t\/\/ Enqueue each testRunner on testRunnerChan\n\tgo func() {\n\t\t\/\/ Sort runners by test count. This heuristic helps our workers avoid being idle\n\t\t\/\/ near the end of the run by running testRunners with the most tests first, avoiding\n\t\t\/\/ scenarios where the last testRunner we run has many tests, causing the entire test\n\t\t\/\/ run to drag on needlessly while other workers are idle.\n\t\trunner.By(func(r1, r2 *runner.TestRunner) bool { return (*r2).TestCount() < (*r1).TestCount() }).Sort(self.runners)\n\n\t\tfor _, testRunner := range self.runners {\n\t\t\ttestRunnerChan <- testRunner\n\t\t}\n\t\tclose(testRunnerChan)\n\t}()\n\n\tvar abort = false\n\tvar testChan = make(chan testEventUnion, numWorkers)\n\n\tvar awaitJobs sync.WaitGroup\n\tawaitJobs.Add(numWorkers)\n\n\tfor _, workerEnv := range workerEnvs {\n\t\tenv := workerEnv\n\t\tgo func() {\n\t\t\tdefer awaitJobs.Done()\n\t\t\tfor testRunner := range testRunnerChan {\n\t\t\t\tif abort {\n\t\t\t\t\tfor i := testRunner.TestCount(); i > 0; i-- {\n\t\t\t\t\t\ttestChan <- testEventUnion{testError: errors.New(\"already aborted\")}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar awaitRun sync.WaitGroup\n\t\t\t\t\tawaitRun.Add(1)\n\t\t\t\t\ttestRunner.Run(\n\t\t\t\t\t\tenv,\n\t\t\t\t\t\ttapjio.DecodingCallbacks{\n\t\t\t\t\t\t\tOnTestBegin: func(test tapjio.TestStartedEvent) error {\n\t\t\t\t\t\t\t\ttestChan <- testEventUnion{nil, &test, nil, nil}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOnTest: func(test tapjio.TestEvent) error {\n\t\t\t\t\t\t\t\ttestChan <- testEventUnion{nil, nil, &test, nil}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOnTrace: func(trace tapjio.TraceEvent) error {\n\t\t\t\t\t\t\t\ttestChan <- testEventUnion{&trace, nil, nil, nil}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOnEnd: func(reason error) error {\n\t\t\t\t\t\t\t\tawaitRun.Done()\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\t\t\t\t\tawaitRun.Wait()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tawaitJobs.Wait()\n\t\tclose(testChan)\n\t}()\n\n\tfor testEventUnion := range testChan {\n\t\tif testEventUnion.trace != nil {\n\t\t\terr = visitor.TraceEvent(*testEventUnion.trace)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tbegin := testEventUnion.testBegan\n\t\tif begin != nil {\n\t\t\terr = visitor.TestStarted(*begin)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\terr = testEventUnion.testError\n\t\tif err == nil {\n\t\t\ttest := testEventUnion.testEvent\n\t\t\tfinal.Counts.Increment(test.Status)\n\n\t\t\terr = visitor.TestFinished(*test)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tabort = true\n\t\t\tfinal.Counts.Increment(tapjio.Error)\n\t\t\tfmt.Fprintln(os.Stderr, \"Error:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package featuremgmt\n\nvar (\n\t\/\/ Register each toggle here\n\tstandardFeatureFlags = []FeatureFlag{\n\t\t{\n\t\t\tName: \"trimDefaults\",\n\t\t\tDescription: \"Use cue schema to remove values that will be applied automatically\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"envelopeEncryption\",\n\t\t\tDescription: \"encrypt secrets\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\n\t\t{\n\t\t\tName: \"httpclientprovider_azure_auth\",\n\t\t\tDescription: \"use http client for azure auth\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"service-accounts\",\n\t\t\tDescription: \"support service accounts\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tRequiresLicense: true,\n\t\t},\n\n\t\t{\n\t\t\tName: \"database_metrics\",\n\t\t\tDescription: \"Add prometheus metrics for database tables\",\n\t\t\tState: FeatureStateStable,\n\t\t},\n\t\t{\n\t\t\tName: \"dashboardPreviews\",\n\t\t\tDescription: \"Create and show thumbnails for dashboard search results\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"live-config\",\n\t\t\tDescription: \"Save grafana live configuration in SQL tables\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"live-pipeline\",\n\t\t\tDescription: \"enable a generic live processing pipeline\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"live-service-web-worker\",\n\t\t\tDescription: \"This will use a webworker thread to processes events rather than the main thread\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"queryOverLive\",\n\t\t\tDescription: \"Use grafana live websocket to execute backend queries\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"tempoSearch\",\n\t\t\tDescription: \"Enable searching in tempo datasources\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"tempoBackendSearch\",\n\t\t\tDescription: \"Use backend for tempo search\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"tempoServiceGraph\",\n\t\t\tDescription: \"show service\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"fullRangeLogsVolume\",\n\t\t\tDescription: \"Show full range logs volume in explore\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"accesscontrol\",\n\t\t\tDescription: \"Support robust access control\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tRequiresLicense: true,\n\t\t},\n\t\t{\n\t\t\tName: \"prometheus_azure_auth\",\n\t\t\tDescription: \"Use azure authentication for prometheus datasource\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"newNavigation\",\n\t\t\tDescription: \"Try the next gen navigation model\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"showFeatureFlagsInUI\",\n\t\t\tDescription: \"Show feature flags in the settings UI\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tRequiresDevMode: true,\n\t\t},\n\t\t{\n\t\t\tName: \"disable_http_request_histogram\",\n\t\t\tDescription: \"Do not create histograms for http requests\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"validatedQueries\",\n\t\t\tDescription: \"only execute the query saved in a panel\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tRequiresDevMode: true,\n\t\t},\n\t}\n)\n<commit_msg>Accesscontrol: Feature does not need a license (#44517)<commit_after>package featuremgmt\n\nvar (\n\t\/\/ Register each toggle here\n\tstandardFeatureFlags = []FeatureFlag{\n\t\t{\n\t\t\tName: \"trimDefaults\",\n\t\t\tDescription: \"Use cue schema to remove values that will be applied automatically\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"envelopeEncryption\",\n\t\t\tDescription: \"encrypt secrets\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\n\t\t{\n\t\t\tName: \"httpclientprovider_azure_auth\",\n\t\t\tDescription: \"use http client for azure auth\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"service-accounts\",\n\t\t\tDescription: \"support service accounts\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tRequiresLicense: true,\n\t\t},\n\n\t\t{\n\t\t\tName: \"database_metrics\",\n\t\t\tDescription: \"Add prometheus metrics for database tables\",\n\t\t\tState: FeatureStateStable,\n\t\t},\n\t\t{\n\t\t\tName: \"dashboardPreviews\",\n\t\t\tDescription: \"Create and show thumbnails for dashboard search results\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"live-config\",\n\t\t\tDescription: \"Save grafana live configuration in SQL tables\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"live-pipeline\",\n\t\t\tDescription: \"enable a generic live processing pipeline\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"live-service-web-worker\",\n\t\t\tDescription: \"This will use a webworker thread to processes events rather than the main thread\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"queryOverLive\",\n\t\t\tDescription: \"Use grafana live websocket to execute backend queries\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"tempoSearch\",\n\t\t\tDescription: \"Enable searching in tempo datasources\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"tempoBackendSearch\",\n\t\t\tDescription: \"Use backend for tempo search\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"tempoServiceGraph\",\n\t\t\tDescription: \"show service\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"fullRangeLogsVolume\",\n\t\t\tDescription: \"Show full range logs volume in explore\",\n\t\t\tState: FeatureStateBeta,\n\t\t\tFrontendOnly: true,\n\t\t},\n\t\t{\n\t\t\tName: \"accesscontrol\",\n\t\t\tDescription: \"Support robust access control\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"prometheus_azure_auth\",\n\t\t\tDescription: \"Use azure authentication for prometheus datasource\",\n\t\t\tState: FeatureStateBeta,\n\t\t},\n\t\t{\n\t\t\tName: \"newNavigation\",\n\t\t\tDescription: \"Try the next gen navigation model\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"showFeatureFlagsInUI\",\n\t\t\tDescription: \"Show feature flags in the settings UI\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tRequiresDevMode: true,\n\t\t},\n\t\t{\n\t\t\tName: \"disable_http_request_histogram\",\n\t\t\tDescription: \"Do not create histograms for http requests\",\n\t\t\tState: FeatureStateAlpha,\n\t\t},\n\t\t{\n\t\t\tName: \"validatedQueries\",\n\t\t\tDescription: \"only execute the query saved in a panel\",\n\t\t\tState: FeatureStateAlpha,\n\t\t\tRequiresDevMode: true,\n\t\t},\n\t}\n)\n<|endoftext|>"} {"text":"<commit_before>package rockredis\n\nimport (\n\t\"bytes\"\n\n\t\"github.com\/absolute8511\/ZanRedisDB\/common\"\n\t\"github.com\/absolute8511\/gorocksdb\"\n)\n\ntype Iterator interface {\n\tNext()\n\tPrev()\n\tValid() bool\n\tSeek([]byte)\n\tSeekForPrev([]byte)\n\tSeekToFirst()\n\tSeekToLast()\n\tClose()\n\tRefKey() []byte\n\tKey() []byte\n\tRefValue() []byte\n\tValue() []byte\n\tNoTimestamp(vt byte)\n}\n\ntype Range struct {\n\tMin []byte\n\tMax []byte\n\tType uint8\n}\n\ntype Limit struct {\n\tOffset int\n\tCount int\n}\n\ntype DBIterator struct {\n\t*gorocksdb.Iterator\n\tsnap *gorocksdb.Snapshot\n\tro *gorocksdb.ReadOptions\n\tdb *gorocksdb.DB\n\tremoveTsType byte\n}\n\nfunc NewDBIterator(db *gorocksdb.DB, withSnap bool) (*DBIterator, error) {\n\tdb.RLock()\n\tdbit := &DBIterator{\n\t\tdb: db,\n\t}\n\treadOpts := gorocksdb.NewDefaultReadOptions()\n\treadOpts.SetFillCache(false)\n\treadOpts.SetVerifyChecksums(false)\n\tdbit.ro = readOpts\n\tvar err error\n\tif withSnap {\n\t\tdbit.snap, err = db.NewSnapshot()\n\t\tif err != nil {\n\t\t\tdbit.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\treadOpts.SetSnapshot(dbit.snap)\n\t}\n\tdbit.Iterator, err = db.NewIterator(readOpts)\n\tif err != nil {\n\t\tdbit.Close()\n\t\treturn nil, err\n\t}\n\treturn dbit, nil\n}\n\nfunc (it *DBIterator) RefKey() []byte {\n\treturn it.Iterator.Key().Data()\n}\n\nfunc (it *DBIterator) Key() []byte {\n\treturn it.Iterator.Key().Bytes()\n}\n\nfunc (it *DBIterator) RefValue() []byte {\n\tv := it.Iterator.Value().Data()\n\tif (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {\n\t\tv = v[:len(v)-tsLen]\n\t}\n\treturn v\n}\n\nfunc (it *DBIterator) Value() []byte {\n\tv := it.Iterator.Value().Bytes()\n\tif (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {\n\t\tv = v[:len(v)-tsLen]\n\t}\n\treturn v\n}\n\nfunc (it *DBIterator) NoTimestamp(vt byte) {\n\tit.removeTsType = vt\n}\n\nfunc (it *DBIterator) Close() {\n\tif it.Iterator != nil {\n\t\tit.Iterator.Close()\n\t}\n\tif it.ro != nil {\n\t\tit.ro.Destroy()\n\t}\n\tif it.snap != nil {\n\t\tit.snap.Release()\n\t}\n\tit.db.RUnlock()\n}\n\nfunc NewDBRangeLimitIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\toffset int, count int, reverse bool) (*RangeLimitedIterator, error) {\n\tdbit, err := NewDBIterator(db, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t} else {\n\t\treturn NewRevRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t}\n}\n\nfunc NewSnapshotDBRangeLimitIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\toffset int, count int, reverse bool) (*RangeLimitedIterator, error) {\n\tdbit, err := NewDBIterator(db, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t} else {\n\t\treturn NewRevRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t}\n}\n\nfunc NewDBRangeIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\treverse bool) (*RangeLimitedIterator, error) {\n\tdbit, err := NewDBIterator(db, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t} else {\n\t\treturn NewRevRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t}\n}\n\nfunc NewSnapshotDBRangeIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\treverse bool) (*RangeLimitedIterator, error) {\n\tdbit, err := NewDBIterator(db, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t} else {\n\t\treturn NewRevRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t}\n}\n\ntype RangeLimitedIterator struct {\n\tIterator\n\tl Limit\n\tr Range\n\t\/\/ maybe step should not auto increase, we need count for actually element\n\tstep int\n\treverse bool\n}\n\nfunc (it *RangeLimitedIterator) Valid() bool {\n\tif it.l.Offset < 0 {\n\t\treturn false\n\t}\n\tif it.l.Count >= 0 && it.step >= it.l.Count {\n\t\treturn false\n\t}\n\tif !it.Iterator.Valid() {\n\t\treturn false\n\t}\n\n\tif !it.reverse {\n\t\tif it.r.Max != nil {\n\t\t\tr := bytes.Compare(it.Iterator.RefKey(), it.r.Max)\n\t\t\tif it.r.Type&common.RangeROpen > 0 {\n\t\t\t\treturn !(r >= 0)\n\t\t\t} else {\n\t\t\t\treturn !(r > 0)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif it.r.Min != nil {\n\t\t\tr := bytes.Compare(it.Iterator.RefKey(), it.r.Min)\n\t\t\tif it.r.Type&common.RangeLOpen > 0 {\n\t\t\t\treturn !(r <= 0)\n\t\t\t} else {\n\t\t\t\treturn !(r < 0)\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (it *RangeLimitedIterator) Next() {\n\tit.step++\n\tif !it.reverse {\n\t\tit.Iterator.Next()\n\t} else {\n\t\tit.Iterator.Prev()\n\t}\n}\n\nfunc NewRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, l, false)\n}\nfunc NewRevRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, l, true)\n}\nfunc NewRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, &Limit{0, -1}, false)\n}\nfunc NewRevRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, &Limit{0, -1}, true)\n}\nfunc rangeLimitIterator(i Iterator, r *Range, l *Limit, reverse bool) *RangeLimitedIterator {\n\tit := &RangeLimitedIterator{\n\t\tIterator: i,\n\t\tl: *l,\n\t\tr: *r,\n\t\treverse: reverse,\n\t\tstep: 0,\n\t}\n\tif l.Offset < 0 {\n\t\treturn it\n\t}\n\tif !reverse {\n\t\tif r.Min == nil {\n\t\t\tit.Iterator.SeekToFirst()\n\t\t} else {\n\t\t\tit.Iterator.Seek(r.Min)\n\t\t\tif r.Type&common.RangeLOpen > 0 {\n\t\t\t\tif it.Iterator.Valid() && bytes.Equal(it.Iterator.RefKey(), r.Min) {\n\t\t\t\t\tit.Iterator.Next()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif r.Max == nil {\n\t\t\tit.Iterator.SeekToLast()\n\t\t} else {\n\t\t\tit.Iterator.SeekForPrev(r.Max)\n\t\t\tif !it.Iterator.Valid() {\n\t\t\t\tit.Iterator.SeekToLast()\n\t\t\t\tif it.Iterator.Valid() && bytes.Compare(it.Iterator.RefKey(), r.Max) == 1 {\n\t\t\t\t\tdbLog.Infof(\"iterator seek to last key %v should not great than seek to max %v\", it.Iterator.RefKey(), r.Max)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif r.Type&common.RangeROpen > 0 {\n\t\t\t\tif it.Iterator.Valid() && bytes.Equal(it.Iterator.RefKey(), r.Max) {\n\t\t\t\t\tit.Iterator.Prev()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor i := 0; i < l.Offset; i++ {\n\t\tif it.Iterator.Valid() {\n\t\t\tif !it.reverse {\n\t\t\t\tit.Iterator.Next()\n\t\t\t} else {\n\t\t\t\tit.Iterator.Prev()\n\t\t\t}\n\t\t}\n\t}\n\treturn it\n}\n<commit_msg>optimize the iterator use read options for bound<commit_after>package rockredis\n\nimport (\n\t\"bytes\"\n\n\t\"github.com\/absolute8511\/ZanRedisDB\/common\"\n\t\"github.com\/absolute8511\/gorocksdb\"\n)\n\ntype Iterator interface {\n\tNext()\n\tPrev()\n\tValid() bool\n\tSeek([]byte)\n\tSeekForPrev([]byte)\n\tSeekToFirst()\n\tSeekToLast()\n\tClose()\n\tRefKey() []byte\n\tKey() []byte\n\tRefValue() []byte\n\tValue() []byte\n\tNoTimestamp(vt byte)\n}\n\ntype Range struct {\n\tMin []byte\n\tMax []byte\n\tType uint8\n}\n\ntype Limit struct {\n\tOffset int\n\tCount int\n}\n\ntype DBIterator struct {\n\t*gorocksdb.Iterator\n\tsnap *gorocksdb.Snapshot\n\tro *gorocksdb.ReadOptions\n\tdb *gorocksdb.DB\n\tremoveTsType byte\n}\n\n\/\/ low_bound is inclusive\n\/\/ upper bound is exclusive\nfunc NewDBIterator(db *gorocksdb.DB, withSnap bool, prefixSame bool, lowbound []byte, upbound []byte, ignoreDel bool) (*DBIterator, error) {\n\tdb.RLock()\n\tdbit := &DBIterator{\n\t\tdb: db,\n\t}\n\treadOpts := gorocksdb.NewDefaultReadOptions()\n\treadOpts.SetFillCache(false)\n\treadOpts.SetVerifyChecksums(false)\n\tif prefixSame {\n\t\treadOpts.SetPrefixSameAsStart(true)\n\t}\n\tif lowbound != nil {\n\t\treadOpts.SetIterLowerBound(lowbound)\n\t}\n\tif upbound != nil {\n\t\treadOpts.SetIterUpperBound(upbound)\n\t}\n\tif ignoreDel {\n\t\t\/\/ may iterator some deleted keys still not compacted.\n\t\treadOpts.SetIgnoreRangeDeletions(true)\n\t}\n\tdbit.ro = readOpts\n\tvar err error\n\tif withSnap {\n\t\tdbit.snap, err = db.NewSnapshot()\n\t\tif err != nil {\n\t\t\tdbit.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\treadOpts.SetSnapshot(dbit.snap)\n\t}\n\tdbit.Iterator, err = db.NewIterator(readOpts)\n\tif err != nil {\n\t\tdbit.Close()\n\t\treturn nil, err\n\t}\n\treturn dbit, nil\n}\n\nfunc (it *DBIterator) RefKey() []byte {\n\treturn it.Iterator.Key().Data()\n}\n\nfunc (it *DBIterator) Key() []byte {\n\treturn it.Iterator.Key().Bytes()\n}\n\nfunc (it *DBIterator) RefValue() []byte {\n\tv := it.Iterator.Value().Data()\n\tif (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {\n\t\tv = v[:len(v)-tsLen]\n\t}\n\treturn v\n}\n\nfunc (it *DBIterator) Value() []byte {\n\tv := it.Iterator.Value().Bytes()\n\tif (it.removeTsType == KVType || it.removeTsType == HashType) && len(v) >= tsLen {\n\t\tv = v[:len(v)-tsLen]\n\t}\n\treturn v\n}\n\nfunc (it *DBIterator) NoTimestamp(vt byte) {\n\tit.removeTsType = vt\n}\n\nfunc (it *DBIterator) Close() {\n\tif it.Iterator != nil {\n\t\tit.Iterator.Close()\n\t}\n\tif it.ro != nil {\n\t\tit.ro.Destroy()\n\t}\n\tif it.snap != nil {\n\t\tit.snap.Release()\n\t}\n\tit.db.RUnlock()\n}\n\n\/\/ note: all the iterator use the prefix iterator flag. Which means it may skip the keys for different table\n\/\/ prefix.\nfunc NewDBRangeLimitIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\toffset int, count int, reverse bool) (*RangeLimitedIterator, error) {\n\tupperBound := max\n\tlowerBound := min\n\tif rtype&common.RangeROpen <= 0 && upperBound != nil {\n\t\t\/\/ range right not open, we need inclusive the max,\n\t\t\/\/ however upperBound is exclusive\n\t\tupperBound = append(upperBound, 0)\n\t}\n\n\tdbit, err := NewDBIterator(db, false, true, lowerBound, upperBound, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t} else {\n\t\treturn NewRevRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t}\n}\n\nfunc NewSnapshotDBRangeLimitIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\toffset int, count int, reverse bool) (*RangeLimitedIterator, error) {\n\tupperBound := max\n\tlowerBound := min\n\tif rtype&common.RangeROpen <= 0 && upperBound != nil {\n\t\t\/\/ range right not open, we need inclusive the max,\n\t\t\/\/ however upperBound is exclusive\n\t\tupperBound = append(upperBound, 0)\n\t}\n\tdbit, err := NewDBIterator(db, true, true, lowerBound, upperBound, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t} else {\n\t\treturn NewRevRangeLimitIterator(dbit, &Range{Min: min, Max: max, Type: rtype},\n\t\t\t&Limit{Offset: offset, Count: count}), nil\n\t}\n}\n\nfunc NewDBRangeIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\treverse bool) (*RangeLimitedIterator, error) {\n\tupperBound := max\n\tlowerBound := min\n\tif rtype&common.RangeROpen <= 0 && upperBound != nil {\n\t\t\/\/ range right not open, we need inclusive the max,\n\t\t\/\/ however upperBound is exclusive\n\t\tupperBound = append(upperBound, 0)\n\t}\n\tdbit, err := NewDBIterator(db, false, true, lowerBound, upperBound, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t} else {\n\t\treturn NewRevRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t}\n}\n\nfunc NewSnapshotDBRangeIterator(db *gorocksdb.DB, min []byte, max []byte, rtype uint8,\n\treverse bool) (*RangeLimitedIterator, error) {\n\tupperBound := max\n\tlowerBound := min\n\tif rtype&common.RangeROpen <= 0 && upperBound != nil {\n\t\t\/\/ range right not open, we need inclusive the max,\n\t\t\/\/ however upperBound is exclusive\n\t\tupperBound = append(upperBound, 0)\n\t}\n\tdbit, err := NewDBIterator(db, true, true, lowerBound, upperBound, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !reverse {\n\t\treturn NewRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t} else {\n\t\treturn NewRevRangeIterator(dbit, &Range{Min: min, Max: max, Type: rtype}), nil\n\t}\n}\n\ntype RangeLimitedIterator struct {\n\tIterator\n\tl Limit\n\tr Range\n\t\/\/ maybe step should not auto increase, we need count for actually element\n\tstep int\n\treverse bool\n}\n\nfunc (it *RangeLimitedIterator) Valid() bool {\n\tif it.l.Offset < 0 {\n\t\treturn false\n\t}\n\tif it.l.Count >= 0 && it.step >= it.l.Count {\n\t\treturn false\n\t}\n\tif !it.Iterator.Valid() {\n\t\treturn false\n\t}\n\n\tif !it.reverse {\n\t\tif it.r.Max != nil {\n\t\t\tr := bytes.Compare(it.Iterator.RefKey(), it.r.Max)\n\t\t\tif it.r.Type&common.RangeROpen > 0 {\n\t\t\t\treturn !(r >= 0)\n\t\t\t} else {\n\t\t\t\treturn !(r > 0)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif it.r.Min != nil {\n\t\t\tr := bytes.Compare(it.Iterator.RefKey(), it.r.Min)\n\t\t\tif it.r.Type&common.RangeLOpen > 0 {\n\t\t\t\treturn !(r <= 0)\n\t\t\t} else {\n\t\t\t\treturn !(r < 0)\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (it *RangeLimitedIterator) Next() {\n\tit.step++\n\tif !it.reverse {\n\t\tit.Iterator.Next()\n\t} else {\n\t\tit.Iterator.Prev()\n\t}\n}\n\nfunc NewRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, l, false)\n}\nfunc NewRevRangeLimitIterator(i Iterator, r *Range, l *Limit) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, l, true)\n}\nfunc NewRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, &Limit{0, -1}, false)\n}\nfunc NewRevRangeIterator(i Iterator, r *Range) *RangeLimitedIterator {\n\treturn rangeLimitIterator(i, r, &Limit{0, -1}, true)\n}\nfunc rangeLimitIterator(i Iterator, r *Range, l *Limit, reverse bool) *RangeLimitedIterator {\n\tit := &RangeLimitedIterator{\n\t\tIterator: i,\n\t\tl: *l,\n\t\tr: *r,\n\t\treverse: reverse,\n\t\tstep: 0,\n\t}\n\tif l.Offset < 0 {\n\t\treturn it\n\t}\n\tif !reverse {\n\t\tif r.Min == nil {\n\t\t\tit.Iterator.SeekToFirst()\n\t\t} else {\n\t\t\tit.Iterator.Seek(r.Min)\n\t\t\tif r.Type&common.RangeLOpen > 0 {\n\t\t\t\tif it.Iterator.Valid() && bytes.Equal(it.Iterator.RefKey(), r.Min) {\n\t\t\t\t\tit.Iterator.Next()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif r.Max == nil {\n\t\t\tit.Iterator.SeekToLast()\n\t\t} else {\n\t\t\tit.Iterator.SeekForPrev(r.Max)\n\t\t\tif !it.Iterator.Valid() {\n\t\t\t\tit.Iterator.SeekToLast()\n\t\t\t\tif it.Iterator.Valid() && bytes.Compare(it.Iterator.RefKey(), r.Max) == 1 {\n\t\t\t\t\tdbLog.Infof(\"iterator seek to last key %v should not great than seek to max %v\", it.Iterator.RefKey(), r.Max)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif r.Type&common.RangeROpen > 0 {\n\t\t\t\tif it.Iterator.Valid() && bytes.Equal(it.Iterator.RefKey(), r.Max) {\n\t\t\t\t\tit.Iterator.Prev()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor i := 0; i < l.Offset; i++ {\n\t\tif it.Iterator.Valid() {\n\t\t\tif !it.reverse {\n\t\t\t\tit.Iterator.Next()\n\t\t\t} else {\n\t\t\t\tit.Iterator.Prev()\n\t\t\t}\n\t\t}\n\t}\n\treturn it\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Kubernetes Authors.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\n\/\/ Package merge contains libraries for merging Resources and Patches\npackage filters\n\nimport (\n\t\"sigs.k8s.io\/kustomize\/kyaml\/yaml\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/yaml\/merge2\"\n)\n\n\/\/ GrepFilter merges Resources with the Group\/Version\/Kind\/Namespace\/Name together using\n\/\/ a 2-way merge strategy.\n\/\/\n\/\/ - Fields set to null in the source will be cleared from the destination\n\/\/ - Fields with matching keys will be merged recursively\n\/\/ - Lists with an associative key (e.g. name) will have their elements merged using the key\n\/\/ - List without an associative key will have the dest list replaced by the source list\ntype MergeFilter struct {\n\tReverse bool\n}\n\ntype mergeKey struct {\n\tapiVersion string\n\tkind string\n\tnamespace string\n\tname string\n}\n\n\/\/ GrepFilter implements kio.GrepFilter by merge Resources with the same G\/V\/K\/NS\/N\nfunc (c MergeFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) {\n\t\/\/ invert the merge precedence\n\tif c.Reverse {\n\t\tfor i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 {\n\t\t\tinput[i], input[j] = input[j], input[i]\n\t\t}\n\t}\n\n\t\/\/ index the Resources by G\/V\/K\/NS\/N\n\tindex := map[mergeKey][]*yaml.RNode{}\n\tfor i := range input {\n\t\tmeta, err := input[i].GetMeta()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey := mergeKey{\n\t\t\tapiVersion: meta.APIVersion,\n\t\t\tkind: meta.Kind,\n\t\t\tnamespace: meta.Namespace,\n\t\t\tname: meta.Name,\n\t\t}\n\t\tindex[key] = append(index[key], input[i])\n\t}\n\n\t\/\/ merge each of the G\/V\/K\/NS\/N lists\n\tvar output []*yaml.RNode\n\tvar err error\n\tfor k := range index {\n\t\tvar merged *yaml.RNode\n\t\tresources := index[k]\n\t\tfor i := range resources {\n\t\t\tpatch := resources[i]\n\t\t\tif merged == nil {\n\t\t\t\t\/\/ first resources, don't merge it\n\t\t\t\tmerged = resources[i]\n\t\t\t} else {\n\t\t\t\tmerged, err = merge2.Merge(patch, merged)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toutput = append(output, merged)\n\t}\n\treturn output, nil\n}\n<commit_msg>Fix godocs on MergeFilter<commit_after>\/\/ Copyright 2019 The Kubernetes Authors.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\n\/\/ Package merge contains libraries for merging Resources and Patches\npackage filters\n\nimport (\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/yaml\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/yaml\/merge2\"\n)\n\n\/\/ MergeFilter merges Resources with the Group\/Version\/Kind\/Namespace\/Name together using\n\/\/ a 2-way merge strategy.\n\/\/\n\/\/ - Fields set to null in the source will be cleared from the destination\n\/\/ - Fields with matching keys will be merged recursively\n\/\/ - Lists with an associative key (e.g. name) will have their elements merged using the key\n\/\/ - List without an associative key will have the dest list replaced by the source list\ntype MergeFilter struct {\n\tReverse bool\n}\n\nvar _ kio.Filter = MergeFilter{}\n\ntype mergeKey struct {\n\tapiVersion string\n\tkind string\n\tnamespace string\n\tname string\n}\n\n\/\/ MergeFilter implements kio.Filter by merging Resources with the same G\/V\/K\/NS\/N\nfunc (c MergeFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) {\n\t\/\/ invert the merge precedence\n\tif c.Reverse {\n\t\tfor i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 {\n\t\t\tinput[i], input[j] = input[j], input[i]\n\t\t}\n\t}\n\n\t\/\/ index the Resources by G\/V\/K\/NS\/N\n\tindex := map[mergeKey][]*yaml.RNode{}\n\tfor i := range input {\n\t\tmeta, err := input[i].GetMeta()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey := mergeKey{\n\t\t\tapiVersion: meta.APIVersion,\n\t\t\tkind: meta.Kind,\n\t\t\tnamespace: meta.Namespace,\n\t\t\tname: meta.Name,\n\t\t}\n\t\tindex[key] = append(index[key], input[i])\n\t}\n\n\t\/\/ merge each of the G\/V\/K\/NS\/N lists\n\tvar output []*yaml.RNode\n\tvar err error\n\tfor k := range index {\n\t\tvar merged *yaml.RNode\n\t\tresources := index[k]\n\t\tfor i := range resources {\n\t\t\tpatch := resources[i]\n\t\t\tif merged == nil {\n\t\t\t\t\/\/ first resources, don't merge it\n\t\t\t\tmerged = resources[i]\n\t\t\t} else {\n\t\t\t\tmerged, err = merge2.Merge(patch, merged)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toutput = append(output, merged)\n\t}\n\treturn output, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package runtime\n\nimport (\n\t\"fmt\"\n\tE \"github.com\/ionous\/sashimi\/event\"\n\tG \"github.com\/ionous\/sashimi\/game\"\n\tM \"github.com\/ionous\/sashimi\/model\"\n\t\"github.com\/ionous\/sashimi\/util\/ident\"\n\t\"strings\"\n)\n\n\/\/\n\/\/ ObjectAdapter wraps GameObject(s) for user script callbacks.\n\/\/ WARNING: for users to test object equality, the ObjectAdapter must be comparable;\n\/\/ it can't implement the interface as a pointer, and it cant have any cached values.\n\/\/\ntype ObjectAdapter struct {\n\tgame *Game \/\/ for console, Go(), and relations\n\tgobj *GameObject\n}\n\n\/\/\n\/\/ String helps debugging.\n\/\/\nfunc (oa ObjectAdapter) String() string {\n\treturn oa.gobj.Id().String()\n}\n\n\/\/\n\/\/ Id uniquely identifies the object.\n\/\/\nfunc (oa ObjectAdapter) Id() ident.Id {\n\treturn oa.gobj.Id()\n}\n\n\/\/\nfunc (oa ObjectAdapter) Remove() {\n\tdelete(oa.game.Objects, oa.gobj.Id())\n}\n\n\/\/\n\/\/ Name of the object.\n\/\/\n\/\/ func (oa ObjectAdapter) Name() string {\n\/\/ \treturn oa.gobj.inst.Name()\n\/\/ }\n\n\/\/\n\/\/ Exists always returns true for ObjectAdapter; see also NullObject which always returns false.\n\/\/\nfunc (oa ObjectAdapter) Exists() bool {\n\treturn true\n}\n\n\/\/\n\/\/ Class returns true when this object is compatible with ( based on ) the named class. ( parent or other ancestor )\n\/\/\nfunc (oa ObjectAdapter) Class(class string) (okay bool) {\n\tif cls, ok := oa.game.Model.Classes.FindClassBySingular(class); ok {\n\t\tokay = oa.gobj.Class().CompatibleWith(cls.Id())\n\t}\n\treturn okay\n}\n\n\/\/\n\/\/ Is this object in the passed state?\n\/\/\nfunc (oa ObjectAdapter) Is(state string) (ret bool) {\n\tif prop, index, ok := oa.gobj.Class().PropertyByChoice(state); !ok {\n\t\toa.logError(fmt.Errorf(\"is: no such choice '%s'.'%s'\", oa, state))\n\t} else {\n\t\ttestChoice, _ := prop.IndexToChoice(index)\n\t\tcurrChoice := oa.gobj.Value(prop.Id())\n\t\tret = currChoice == testChoice\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ IsNow changes the state of an object.\n\/\/\nfunc (oa ObjectAdapter) IsNow(state string) {\n\tif prop, index, ok := oa.gobj.Class().PropertyByChoice(state); !ok {\n\t\toa.logError(fmt.Errorf(\"IsNow: no such choice '%s'.'%s'\", oa, state))\n\t} else {\n\t\t\/\/ get the current choice from the implied property slot\n\t\tif currChoice, ok := oa.gobj.Value(prop.Id()).(ident.Id); !ok {\n\t\t\terr := TypeMismatch(oa.gobj.Id().String(), prop.Id().String())\n\t\t\toa.logError(err)\n\t\t} else {\n\t\t\tnewChoice, _ := prop.IndexToChoice(index)\n\t\t\tif currChoice != newChoice {\n\t\t\t\toa.gobj.removeDirect(currChoice) \/\/ delete the old choice's boolean,\n\t\t\t\toa.gobj.setDirect(newChoice, true) \/\/ and set the new\n\t\t\t\toa.gobj.setDirect(prop.Id(), newChoice) \/\/ \/\/ set the property slot to the new choice\n\t\t\t\toa.game.Properties.Notify(oa.gobj.Id(), prop.Id(), currChoice, newChoice)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/\n\/\/ Num value of the named property.\n\/\/\nfunc (oa ObjectAdapter) Num(prop string) (ret float32) {\n\tid := M.MakeStringId(prop)\n\tif val, ok := oa.gobj.Value(id).(float32); !ok {\n\t\toa.logError(TypeMismatch(prop, \"get num\"))\n\t} else {\n\t\tret = val\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ SetNum changes the value of an existing number property.\n\/\/\nfunc (oa ObjectAdapter) SetNum(prop string, value float32) {\n\tid := M.MakeStringId(prop)\n\tif old, ok := oa.gobj.SetValue(id, value); !ok {\n\t\toa.logError(TypeMismatch(prop, \"set num\"))\n\t} else {\n\t\toa.game.Properties.Notify(oa.gobj.Id(), id, old, value)\n\t}\n}\n\n\/\/\n\/\/ Text value of the named property ( expanding any templated text. )\n\/\/ ( interestingly, inform seems to error when trying to store or manipulate templated text. )\n\/\/\nfunc (oa ObjectAdapter) Text(prop string) (ret string) {\n\tid := M.MakeStringId(prop)\n\t\/\/ is oa text stored as a template?\n\tif temp, ok := oa.gobj.temps[id.String()]; ok {\n\t\tif s, e := runTemplate(temp, oa.gobj.vals); e != nil {\n\t\t\toa.logError(e)\n\t\t} else {\n\t\t\tret = s\n\t\t}\n\t} else if val, ok := oa.gobj.Value(id).(string); !ok {\n\t\toa.logError(TypeMismatch(prop, \"get text\"))\n\t} else {\n\t\tret = val\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ SetText changes the value of an existing text property.\n\/\/\nfunc (oa ObjectAdapter) SetText(prop string, text string) {\n\tid := M.MakeStringId(prop)\n\tif e := oa.gobj.temps.New(id.String(), text); e != nil {\n\t\toa.logError(e)\n\t} else if old, ok := oa.gobj.SetValue(id, text); !ok {\n\t\toa.logError(TypeMismatch(prop, \"set text\"))\n\t} else {\n\t\toa.game.Properties.Notify(oa.gobj.Id(), id, old, text)\n\t}\n}\n\n\/\/\n\/\/ Object returns a related object.\n\/\/\nfunc (oa ObjectAdapter) Object(prop string) (ret G.IObject) {\n\t\/\/ TBD: should these be logged? its sure nice to have be able to test objects generically for properties\n\tvar res ident.Id\n\tif p, ok := oa.gobj.Class().FindProperty(prop); ok {\n\t\tswitch p := p.(type) {\n\t\tcase *M.PointerProperty:\n\t\t\tif val, ok := oa.gobj.Value(p.Id()).(ident.Id); ok {\n\t\t\t\tres = val\n\t\t\t}\n\t\tcase *M.RelativeProperty:\n\t\t\t\/\/ TBD: can the relative property changes automatically reflect into the value table\n\t\t\t\/\/ ex. on event?\n\t\t\tif rel, ok := oa.gobj.Value(p.Id()).(RelativeValue); ok {\n\t\t\t\tif p.ToMany() {\n\t\t\t\t\toa.logError(fmt.Errorf(\"object requested, but relation is list\"))\n\t\t\t\t} else {\n\t\t\t\t\tlist := rel.List()\n\t\t\t\t\tif len(list) != 0 {\n\t\t\t\t\t\tres = ident.Id(list[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn NewObjectAdapter(oa.game, oa.game.Objects[res])\n}\n\n\/\/\n\/\/ Set changes an object relationship.\n\/\/\nfunc (oa ObjectAdapter) Set(prop string, object G.IObject) {\n\tif p, ok := oa.gobj.Class().FindProperty(prop); ok {\n\t\tswitch p := p.(type) {\n\t\tdefault:\n\t\t\toa.logError(TypeMismatch(oa.String(), prop))\n\t\tcase *M.PointerProperty:\n\t\t\tset := false\n\t\t\tif other, ok := object.(ObjectAdapter); !ok {\n\t\t\t\toa.gobj.SetValue(p.Id(), ident.Id(\"\"))\n\t\t\t\tset = true\n\t\t\t} else {\n\t\t\t\toa.gobj.SetValue(p.Id(), other.gobj.Id())\n\t\t\t\tset = true\n\t\t\t}\n\t\t\tif !set {\n\t\t\t\toa.logError(fmt.Errorf(\"couldnt set value for prop %s\", prop))\n\t\t\t}\n\t\tcase *M.RelativeProperty:\n\t\t\tif rel, ok := oa.gobj.Value(p.Id()).(RelativeValue); ok {\n\n\t\t\t\t\/\/ if the referenced object doesnt exist, we take it to mean they are clearing the reference.\n\t\t\t\tif other, ok := object.(ObjectAdapter); !ok {\n\t\t\t\t\tif removed, e := rel.ClearReference(); e != nil {\n\t\t\t\t\t\toa.logError(e)\n\t\t\t\t\t} else {\n\t\t\t\t\t\toa.game.Properties.Notify(oa.gobj.Id(), p.Id(), removed, ident.Empty())\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ FIX? the impedence b\/t IObject and Reference is annoying.\n\t\t\t\t\tother := other.gobj.Id()\n\t\t\t\t\tif ref, ok := oa.game.Model.Instances[other]; !ok {\n\t\t\t\t\t\toa.logError(fmt.Errorf(\"Set: couldnt find object names %s\", other))\n\t\t\t\t\t} else if removed, e := rel.SetReference(ref); e != nil {\n\t\t\t\t\t\toa.logError(e)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\/\/ removed is probably a single object\n\t\t\t\t\t\toa.game.Properties.Notify(oa.gobj.Id(), p.Id(), removed, other)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/\n\/\/ ObjectList returns a list of related objects.\n\/\/\nfunc (oa ObjectAdapter) ObjectList(prop string) (ret []G.IObject) {\n\tif p, ok := oa.gobj.Class().FindProperty(prop); ok {\n\t\tswitch p := p.(type) {\n\t\tdefault:\n\t\t\toa.logError(TypeMismatch(oa.String(), prop))\n\t\tcase *M.RelativeProperty:\n\t\t\tif rel, ok := oa.gobj.Value(p.Id()).(RelativeValue); ok {\n\t\t\t\tlist := rel.List()\n\t\t\t\tret = make([]G.IObject, len(list))\n\t\t\t\tfor i, objId := range list {\n\t\t\t\t\tret[i] = NewObjectAdapter(oa.game, oa.game.Objects[objId])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ Says provides this object with a voice.\n\/\/\nfunc (oa ObjectAdapter) Says(text string) {\n\t\/\/ FIX: share some template love with GameEventAdapter.Say()\n\tlines := strings.Split(text, \"\\n\")\n\toa.game.output.ActorSays(oa.gobj, lines)\n}\n\n\/\/\n\/\/ Go sends all the events associated with the named action,\n\/\/ and runs the default action if appropriate.\n\/\/ @see also: Game.ProcessEventQueue\n\/\/\nfunc (oa ObjectAdapter) Go(act string, objects ...G.IObject) {\n\tif action, ok := oa.game.Model.Actions.FindActionByName(act); !ok {\n\t\te := fmt.Errorf(\"unknown action for Go %s\", act)\n\t\toa.logError(e)\n\t} else {\n\t\t\/\/ FIX, ugly: we need the props, even tho we already have the objects...\n\t\tnouns := make([]ident.Id, len(objects)+1)\n\t\tnouns[0] = oa.Id()\n\t\tfor i, o := range objects {\n\t\t\tnouns[i+1] = o.Id()\n\t\t}\n\t\tif act, e := oa.game.newRuntimeAction(action, nouns...); e != nil {\n\t\t\toa.logError(e)\n\t\t} else {\n\t\t\ttgt := ObjectTarget{oa.game, oa.gobj}\n\t\t\tmsg := &E.Message{Name: action.Event(), Data: act}\n\t\t\tif e := oa.game.frame.SendMessage(tgt, msg); e != nil {\n\t\t\t\toa.logError(e)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/\n\/\/\n\/\/\nfunc (oa ObjectAdapter) logError(err error) (hadError bool) {\n\tif err != nil {\n\t\toa.game.log.Output(4, fmt.Sprint(\"!!!Error:\", err.Error()))\n\t\thadError = true\n\t}\n\treturn hadError\n}\n<commit_msg>consolidate per-object property response into a single notify<commit_after>package runtime\n\nimport (\n\t\"fmt\"\n\tE \"github.com\/ionous\/sashimi\/event\"\n\tG \"github.com\/ionous\/sashimi\/game\"\n\tM \"github.com\/ionous\/sashimi\/model\"\n\t\"github.com\/ionous\/sashimi\/util\/ident\"\n\t\"strings\"\n)\n\n\/\/\n\/\/ ObjectAdapter wraps GameObject(s) for user script callbacks.\n\/\/ WARNING: for users to test object equality, the ObjectAdapter must be comparable;\n\/\/ it can't implement the interface as a pointer, and it cant have any cached values.\n\/\/\ntype ObjectAdapter struct {\n\tgame *Game \/\/ for console, Go(), and relations\n\tgobj *GameObject\n}\n\n\/\/\n\/\/ String helps debugging.\n\/\/\nfunc (oa ObjectAdapter) String() string {\n\treturn oa.gobj.Id().String()\n}\n\n\/\/\n\/\/ Id uniquely identifies the object.\n\/\/\nfunc (oa ObjectAdapter) Id() ident.Id {\n\treturn oa.gobj.Id()\n}\n\n\/\/\nfunc (oa ObjectAdapter) Remove() {\n\tdelete(oa.game.Objects, oa.gobj.Id())\n}\n\n\/\/\n\/\/ Name of the object.\n\/\/\n\/\/ func (oa ObjectAdapter) Name() string {\n\/\/ \treturn oa.gobj.inst.Name()\n\/\/ }\n\n\/\/\n\/\/ Exists always returns true for ObjectAdapter; see also NullObject which always returns false.\n\/\/\nfunc (oa ObjectAdapter) Exists() bool {\n\treturn true\n}\n\n\/\/\n\/\/ Class returns true when this object is compatible with ( based on ) the named class. ( parent or other ancestor )\n\/\/\nfunc (oa ObjectAdapter) Class(class string) (okay bool) {\n\tif cls, ok := oa.game.Model.Classes.FindClassBySingular(class); ok {\n\t\tokay = oa.gobj.Class().CompatibleWith(cls.Id())\n\t}\n\treturn okay\n}\n\n\/\/\n\/\/ Is this object in the passed state?\n\/\/\nfunc (oa ObjectAdapter) Is(state string) (ret bool) {\n\tif prop, index, ok := oa.gobj.Class().PropertyByChoice(state); !ok {\n\t\toa.logError(fmt.Errorf(\"is: no such choice '%s'.'%s'\", oa, state))\n\t} else {\n\t\ttestChoice, _ := prop.IndexToChoice(index)\n\t\tcurrChoice := oa.gobj.Value(prop.Id())\n\t\tret = currChoice == testChoice\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ IsNow changes the state of an object.\n\/\/\nfunc (oa ObjectAdapter) IsNow(state string) {\n\tif prop, index, ok := oa.gobj.Class().PropertyByChoice(state); !ok {\n\t\toa.logError(fmt.Errorf(\"IsNow: no such choice '%s'.'%s'\", oa, state))\n\t} else {\n\t\t\/\/ get the current choice from the implied property slot\n\t\tif currChoice, ok := oa.gobj.Value(prop.Id()).(ident.Id); !ok {\n\t\t\terr := TypeMismatch(oa.gobj.Id().String(), prop.Id().String())\n\t\t\toa.logError(err)\n\t\t} else {\n\t\t\tnewChoice, _ := prop.IndexToChoice(index)\n\t\t\tif currChoice != newChoice {\n\t\t\t\toa.gobj.removeDirect(currChoice) \/\/ delete the old choice's boolean,\n\t\t\t\toa.gobj.setDirect(newChoice, true) \/\/ and set the new\n\t\t\t\toa.gobj.setDirect(prop.Id(), newChoice) \/\/ \/\/ set the property slot to the new choice\n\t\t\t\toa.game.Properties.Notify(oa.gobj.Id(), prop.Id(), currChoice, newChoice)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/\n\/\/ Num value of the named property.\n\/\/\nfunc (oa ObjectAdapter) Num(prop string) (ret float32) {\n\tid := M.MakeStringId(prop)\n\tif val, ok := oa.gobj.Value(id).(float32); !ok {\n\t\toa.logError(TypeMismatch(prop, \"get num\"))\n\t} else {\n\t\tret = val\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ SetNum changes the value of an existing number property.\n\/\/\nfunc (oa ObjectAdapter) SetNum(prop string, value float32) {\n\tid := M.MakeStringId(prop)\n\tif old, ok := oa.gobj.SetValue(id, value); !ok {\n\t\toa.logError(TypeMismatch(prop, \"set num\"))\n\t} else {\n\t\toa.game.Properties.Notify(oa.gobj.Id(), id, old, value)\n\t}\n}\n\n\/\/\n\/\/ Text value of the named property ( expanding any templated text. )\n\/\/ ( interestingly, inform seems to error when trying to store or manipulate templated text. )\n\/\/\nfunc (oa ObjectAdapter) Text(prop string) (ret string) {\n\tid := M.MakeStringId(prop)\n\t\/\/ is oa text stored as a template?\n\tif temp, ok := oa.gobj.temps[id.String()]; ok {\n\t\tif s, e := runTemplate(temp, oa.gobj.vals); e != nil {\n\t\t\toa.logError(e)\n\t\t} else {\n\t\t\tret = s\n\t\t}\n\t} else if val, ok := oa.gobj.Value(id).(string); !ok {\n\t\toa.logError(TypeMismatch(prop, \"get text\"))\n\t} else {\n\t\tret = val\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ SetText changes the value of an existing text property.\n\/\/\nfunc (oa ObjectAdapter) SetText(prop string, text string) {\n\tid := M.MakeStringId(prop)\n\tif e := oa.gobj.temps.New(id.String(), text); e != nil {\n\t\toa.logError(e)\n\t} else if old, ok := oa.gobj.SetValue(id, text); !ok {\n\t\toa.logError(TypeMismatch(prop, \"set text\"))\n\t} else {\n\t\toa.game.Properties.Notify(oa.gobj.Id(), id, old, text)\n\t}\n}\n\n\/\/\n\/\/ Object returns a related object.\n\/\/\nfunc (oa ObjectAdapter) Object(prop string) (ret G.IObject) {\n\t\/\/ TBD: should these be logged? its sure nice to have be able to test objects generically for properties\n\tvar res ident.Id\n\tif p, ok := oa.gobj.Class().FindProperty(prop); ok {\n\t\tswitch p := p.(type) {\n\t\tcase *M.PointerProperty:\n\t\t\tif val, ok := oa.gobj.Value(p.Id()).(ident.Id); ok {\n\t\t\t\tres = val\n\t\t\t}\n\t\tcase *M.RelativeProperty:\n\t\t\t\/\/ TBD: can the relative property changes automatically reflect into the value table\n\t\t\t\/\/ ex. on event?\n\t\t\tif rel, ok := oa.gobj.Value(p.Id()).(RelativeValue); ok {\n\t\t\t\tif p.ToMany() {\n\t\t\t\t\toa.logError(fmt.Errorf(\"object requested, but relation is list\"))\n\t\t\t\t} else {\n\t\t\t\t\tlist := rel.List()\n\t\t\t\t\tif len(list) != 0 {\n\t\t\t\t\t\tres = ident.Id(list[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn NewObjectAdapter(oa.game, oa.game.Objects[res])\n}\n\n\/\/\n\/\/ Set changes an object relationship.\n\/\/\nfunc (oa ObjectAdapter) Set(prop string, object G.IObject) {\n\tif p, ok := oa.gobj.Class().FindProperty(prop); ok {\n\t\tswitch p := p.(type) {\n\t\tdefault:\n\t\t\toa.logError(TypeMismatch(oa.String(), prop))\n\t\tcase *M.PointerProperty:\n\t\t\tset := false\n\t\t\tif other, ok := object.(ObjectAdapter); !ok {\n\t\t\t\toa.gobj.SetValue(p.Id(), ident.Id(\"\"))\n\t\t\t\tset = true\n\t\t\t} else {\n\t\t\t\toa.gobj.SetValue(p.Id(), other.gobj.Id())\n\t\t\t\tset = true\n\t\t\t}\n\t\t\tif !set {\n\t\t\t\toa.logError(fmt.Errorf(\"couldnt set value for prop %s\", prop))\n\t\t\t}\n\t\tcase *M.RelativeProperty:\n\t\t\tif rel, ok := oa.gobj.Value(p.Id()).(RelativeValue); ok {\n\t\t\t\tvar prev, next ident.Id\n\t\t\t\tvar err error\n\n\t\t\t\t\/\/ if the new object doesnt exist, we take it to mean they are clearing the reference.\n\t\t\t\tif other, ok := object.(ObjectAdapter); !ok {\n\t\t\t\t\tif removed, e := rel.ClearReference(); e != nil {\n\t\t\t\t\t\terr = e\n\t\t\t\t\t} else {\n\t\t\t\t\t\tprev = removed\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ FIX? the impedence b\/t IObject and Reference is annoying.\n\t\t\t\t\tother := other.gobj.Id()\n\t\t\t\t\tif ref, ok := oa.game.Model.Instances[other]; !ok {\n\t\t\t\t\t\terr = fmt.Errorf(\"Set: couldnt find object names %s\", other)\n\t\t\t\t\t} else if removed, e := rel.SetReference(ref); e != nil {\n\t\t\t\t\t\terr = e\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\/\/ removed is probably? a single object\n\t\t\t\t\t\tprev = removed\n\t\t\t\t\t\tnext = other\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\toa.logError(err)\n\t\t\t\t} else {\n\t\t\t\t\toa.game.Properties.Notify(oa.gobj.Id(), p.Id(), prev, next)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/\n\/\/ ObjectList returns a list of related objects.\n\/\/\nfunc (oa ObjectAdapter) ObjectList(prop string) (ret []G.IObject) {\n\tif p, ok := oa.gobj.Class().FindProperty(prop); ok {\n\t\tswitch p := p.(type) {\n\t\tdefault:\n\t\t\toa.logError(TypeMismatch(oa.String(), prop))\n\t\tcase *M.RelativeProperty:\n\t\t\tif rel, ok := oa.gobj.Value(p.Id()).(RelativeValue); ok {\n\t\t\t\tlist := rel.List()\n\t\t\t\tret = make([]G.IObject, len(list))\n\t\t\t\tfor i, objId := range list {\n\t\t\t\t\tret[i] = NewObjectAdapter(oa.game, oa.game.Objects[objId])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/\n\/\/ Says provides this object with a voice.\n\/\/\nfunc (oa ObjectAdapter) Says(text string) {\n\t\/\/ FIX: share some template love with GameEventAdapter.Say()\n\tlines := strings.Split(text, \"\\n\")\n\toa.game.output.ActorSays(oa.gobj, lines)\n}\n\n\/\/\n\/\/ Go sends all the events associated with the named action,\n\/\/ and runs the default action if appropriate.\n\/\/ @see also: Game.ProcessEventQueue\n\/\/\nfunc (oa ObjectAdapter) Go(act string, objects ...G.IObject) {\n\tif action, ok := oa.game.Model.Actions.FindActionByName(act); !ok {\n\t\te := fmt.Errorf(\"unknown action for Go %s\", act)\n\t\toa.logError(e)\n\t} else {\n\t\t\/\/ FIX, ugly: we need the props, even tho we already have the objects...\n\t\tnouns := make([]ident.Id, len(objects)+1)\n\t\tnouns[0] = oa.Id()\n\t\tfor i, o := range objects {\n\t\t\tnouns[i+1] = o.Id()\n\t\t}\n\t\tif act, e := oa.game.newRuntimeAction(action, nouns...); e != nil {\n\t\t\toa.logError(e)\n\t\t} else {\n\t\t\ttgt := ObjectTarget{oa.game, oa.gobj}\n\t\t\tmsg := &E.Message{Name: action.Event(), Data: act}\n\t\t\tif e := oa.game.frame.SendMessage(tgt, msg); e != nil {\n\t\t\t\toa.logError(e)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/\n\/\/\n\/\/\nfunc (oa ObjectAdapter) logError(err error) (hadError bool) {\n\tif err != nil {\n\t\toa.game.log.Output(4, fmt.Sprint(\"!!!Error:\", err.Error()))\n\t\thadError = true\n\t}\n\treturn hadError\n}\n<|endoftext|>"} {"text":"<commit_before>package procspy\n\n\/\/ \/proc-based implementation.\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/armon\/go-metrics\"\n\n\t\"github.com\/weaveworks\/common\/fs\"\n\t\"github.com\/weaveworks\/scope\/common\/marshal\"\n\t\"github.com\/weaveworks\/scope\/probe\/process\"\n)\n\nvar (\n\tprocRoot = \"\/proc\"\n\tnamespaceKey = []string{\"procspy\", \"namespaces\"}\n\tnetNamespacePathSuffix = \"\"\n)\n\ntype pidWalker struct {\n\twalker process.Walker\n\ttickc <-chan time.Time \/\/ Rate-limit clock. Sets the pace when traversing namespaces and \/proc\/PID\/fd\/* files.\n\tstopc chan struct{} \/\/ Abort walk\n\tfdBlockSize uint64 \/\/ Maximum number of \/proc\/PID\/fd\/* files to stat() per tick\n}\n\nfunc newPidWalker(walker process.Walker, tickc <-chan time.Time, fdBlockSize uint64) pidWalker {\n\tw := pidWalker{\n\t\twalker: walker,\n\t\ttickc: tickc,\n\t\tfdBlockSize: fdBlockSize,\n\t\tstopc: make(chan struct{}),\n\t}\n\treturn w\n}\n\n\/\/ SetProcRoot sets the location of the proc filesystem.\nfunc SetProcRoot(root string) {\n\tprocRoot = root\n}\n\nfunc getKernelVersion() (major, minor int, err error) {\n\tvar u syscall.Utsname\n\tif err = syscall.Uname(&u); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Kernel versions are not always a semver, so we have to do minimal parsing.\n\trelease := marshal.FromUtsname(u.Release)\n\tif n, err := fmt.Sscanf(release, \"%d.%d\", &major, &minor); err != nil || n != 2 {\n\t\treturn 0, 0, fmt.Errorf(\"Malformed version: %s\", release)\n\t}\n\treturn\n}\n\nfunc getNetNamespacePathSuffix() string {\n\t\/\/ With Linux 3.8 or later the network namespace of a process can be\n\t\/\/ determined by the inode of \/proc\/PID\/net\/ns. Before that, Any file\n\t\/\/ under \/proc\/PID\/net\/ could be used but it's not documented and may\n\t\/\/ break in newer kernels.\n\tconst (\n\t\tpost38Path = \"ns\/net\"\n\t\tpre38Path = \"net\/dev\"\n\t)\n\n\tif netNamespacePathSuffix != \"\" {\n\t\treturn netNamespacePathSuffix\n\t}\n\n\tmajor, minor, err := getKernelVersion()\n\tif err != nil {\n\t\tlog.Errorf(\"getNamespacePathSuffix: cannot get kernel version: %s\", err)\n\t\tnetNamespacePathSuffix = post38Path\n\t\treturn netNamespacePathSuffix\n\t}\n\n\tif major < 3 || (major == 3 && minor < 8) {\n\t\tnetNamespacePathSuffix = pre38Path\n\t} else {\n\t\tnetNamespacePathSuffix = post38Path\n\t}\n\treturn netNamespacePathSuffix\n}\n\n\/\/ Read the connections for a group of processes living in the same namespace,\n\/\/ which are found (identically) in \/proc\/PID\/net\/tcp{,6} for any of the\n\/\/ processes.\nfunc readProcessConnections(buf *bytes.Buffer, namespaceProcs []*process.Process) (bool, error) {\n\tvar (\n\t\terrRead error\n\t\terrRead6 error\n\t\tread int64\n\t\tread6 int64\n\t)\n\n\tfor _, p := range namespaceProcs {\n\t\tdirName := strconv.Itoa(p.PID)\n\n\t\tread, errRead = readFile(filepath.Join(procRoot, dirName, \"\/net\/tcp\"), buf)\n\t\tread6, errRead6 = readFile(filepath.Join(procRoot, dirName, \"\/net\/tcp6\"), buf)\n\n\t\tif errRead != nil || errRead6 != nil {\n\t\t\t\/\/ try next process\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Return after succeeding on any process\n\t\t\/\/ (proc\/PID\/net\/tcp and proc\/PID\/net\/tcp6 are identical for all the processes in the same namespace)\n\t\treturn read+read6 > 0, nil\n\t}\n\n\t\/\/ It would be cool an \"or\" error combinator\n\tif errRead != nil {\n\t\treturn false, errRead\n\t}\n\tif errRead6 != nil {\n\t\treturn false, errRead6\n\t}\n\n\treturn false, nil\n\n}\n\n\/\/ walkNamespace does the work of walk for a single namespace\nfunc (w pidWalker) walkNamespace(namespaceID uint64, buf *bytes.Buffer, sockets map[uint64]*Proc, namespaceProcs []*process.Process) error {\n\n\tif found, err := readProcessConnections(buf, namespaceProcs); err != nil || !found {\n\t\treturn err\n\t}\n\n\tvar statT syscall.Stat_t\n\tvar fdBlockCount uint64\n\tfor i, p := range namespaceProcs {\n\n\t\t\/\/ Get the sockets for all the processes in the namespace\n\t\tdirName := strconv.Itoa(p.PID)\n\t\tfdBase := filepath.Join(procRoot, dirName, \"fd\")\n\n\t\tif fdBlockCount > w.fdBlockSize {\n\t\t\t\/\/ we surpassed the filedescriptor rate limit\n\t\t\tselect {\n\t\t\tcase <-w.tickc:\n\t\t\tcase <-w.stopc:\n\t\t\t\treturn nil \/\/ abort\n\t\t\t}\n\n\t\t\tfdBlockCount = 0\n\t\t\t\/\/ read the connections again to\n\t\t\t\/\/ avoid the race between between \/net\/tcp{,6} and \/proc\/PID\/fd\/*\n\t\t\tif found, err := readProcessConnections(buf, namespaceProcs[i:]); err != nil || !found {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfds, err := fs.ReadDirNames(fdBase)\n\t\tif err != nil {\n\t\t\t\/\/ Process is gone by now, or we don't have access.\n\t\t\tcontinue\n\t\t}\n\n\t\tvar proc *Proc\n\t\tfor _, fd := range fds {\n\t\t\tfdBlockCount++\n\n\t\t\t\/\/ Direct use of syscall.Stat() to save garbage.\n\t\t\terr = fs.Stat(filepath.Join(fdBase, fd), &statT)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ We want sockets only.\n\t\t\tif statT.Mode&syscall.S_IFMT != syscall.S_IFSOCK {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Initialize proc lazily to avoid creating unnecessary\n\t\t\t\/\/ garbage\n\t\t\tif proc == nil {\n\t\t\t\tproc = &Proc{\n\t\t\t\t\tPID: uint(p.PID),\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tNetNamespaceID: namespaceID,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsockets[statT.Ino] = proc\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\n\/\/ walk walks over all numerical (PID) \/proc entries. It reads\n\/\/ \/proc\/PID\/net\/tcp{,6} for each namespace and sees if the .\/fd\/* files of each\n\/\/ process in that namespace are symlinks to sockets. Returns a map from socket\n\/\/ ID (inode) to PID.\nfunc (w pidWalker) walk(buf *bytes.Buffer) (map[uint64]*Proc, error) {\n\tvar (\n\t\tsockets = map[uint64]*Proc{} \/\/ map socket inode -> process\n\t\tnamespaces = map[uint64][]*process.Process{} \/\/ map network namespace id -> processes\n\t\tstatT syscall.Stat_t\n\t)\n\n\t\/\/ We do two process traversals: One to group processes by namespace and\n\t\/\/ another one to obtain their connections.\n\t\/\/\n\t\/\/ The first traversal is needed to allow obtaining the connections on a\n\t\/\/ per-namespace basis. This is done to minimize the race condition\n\t\/\/ between reading \/net\/tcp{,6} of each namespace and \/proc\/PID\/fd\/* for\n\t\/\/ the processes living in that namespace.\n\n\tw.walker.Walk(func(p, _ process.Process) {\n\t\tdirName := strconv.Itoa(p.PID)\n\n\t\tnetNamespacePath := filepath.Join(procRoot, dirName, getNetNamespacePathSuffix())\n\t\tif err := fs.Stat(netNamespacePath, &statT); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tnamespaceID := statT.Ino\n\t\tnamespaces[namespaceID] = append(namespaces[namespaceID], &p)\n\t})\n\n\tfor namespaceID, procs := range namespaces {\n\t\tselect {\n\t\tcase <-w.tickc:\n\t\t\tw.walkNamespace(namespaceID, buf, sockets, procs)\n\t\tcase <-w.stopc:\n\t\t\tbreak \/\/ abort\n\t\t}\n\t}\n\n\tmetrics.SetGauge(namespaceKey, float32(len(namespaces)))\n\treturn sockets, nil\n}\n\nfunc (w pidWalker) stop() {\n\tclose(w.stopc)\n}\n\n\/\/ readFile reads an arbitrary file into a buffer.\nfunc readFile(filename string, buf *bytes.Buffer) (int64, error) {\n\tf, err := fs.Open(filename)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer f.Close()\n\treturn buf.ReadFrom(f)\n}\n<commit_msg>Fix minor typo<commit_after>package procspy\n\n\/\/ \/proc-based implementation.\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/armon\/go-metrics\"\n\n\t\"github.com\/weaveworks\/common\/fs\"\n\t\"github.com\/weaveworks\/scope\/common\/marshal\"\n\t\"github.com\/weaveworks\/scope\/probe\/process\"\n)\n\nvar (\n\tprocRoot = \"\/proc\"\n\tnamespaceKey = []string{\"procspy\", \"namespaces\"}\n\tnetNamespacePathSuffix = \"\"\n)\n\ntype pidWalker struct {\n\twalker process.Walker\n\ttickc <-chan time.Time \/\/ Rate-limit clock. Sets the pace when traversing namespaces and \/proc\/PID\/fd\/* files.\n\tstopc chan struct{} \/\/ Abort walk\n\tfdBlockSize uint64 \/\/ Maximum number of \/proc\/PID\/fd\/* files to stat() per tick\n}\n\nfunc newPidWalker(walker process.Walker, tickc <-chan time.Time, fdBlockSize uint64) pidWalker {\n\tw := pidWalker{\n\t\twalker: walker,\n\t\ttickc: tickc,\n\t\tfdBlockSize: fdBlockSize,\n\t\tstopc: make(chan struct{}),\n\t}\n\treturn w\n}\n\n\/\/ SetProcRoot sets the location of the proc filesystem.\nfunc SetProcRoot(root string) {\n\tprocRoot = root\n}\n\nfunc getKernelVersion() (major, minor int, err error) {\n\tvar u syscall.Utsname\n\tif err = syscall.Uname(&u); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Kernel versions are not always a semver, so we have to do minimal parsing.\n\trelease := marshal.FromUtsname(u.Release)\n\tif n, err := fmt.Sscanf(release, \"%d.%d\", &major, &minor); err != nil || n != 2 {\n\t\treturn 0, 0, fmt.Errorf(\"Malformed version: %s\", release)\n\t}\n\treturn\n}\n\nfunc getNetNamespacePathSuffix() string {\n\t\/\/ With Linux 3.8 or later the network namespace of a process can be\n\t\/\/ determined by the inode of \/proc\/PID\/net\/ns. Before that, Any file\n\t\/\/ under \/proc\/PID\/net\/ could be used but it's not documented and may\n\t\/\/ break in newer kernels.\n\tconst (\n\t\tpost38Path = \"ns\/net\"\n\t\tpre38Path = \"net\/dev\"\n\t)\n\n\tif netNamespacePathSuffix != \"\" {\n\t\treturn netNamespacePathSuffix\n\t}\n\n\tmajor, minor, err := getKernelVersion()\n\tif err != nil {\n\t\tlog.Errorf(\"getNamespacePathSuffix: cannot get kernel version: %s\", err)\n\t\tnetNamespacePathSuffix = post38Path\n\t\treturn netNamespacePathSuffix\n\t}\n\n\tif major < 3 || (major == 3 && minor < 8) {\n\t\tnetNamespacePathSuffix = pre38Path\n\t} else {\n\t\tnetNamespacePathSuffix = post38Path\n\t}\n\treturn netNamespacePathSuffix\n}\n\n\/\/ Read the connections for a group of processes living in the same namespace,\n\/\/ which are found (identically) in \/proc\/PID\/net\/tcp{,6} for any of the\n\/\/ processes.\nfunc readProcessConnections(buf *bytes.Buffer, namespaceProcs []*process.Process) (bool, error) {\n\tvar (\n\t\terrRead error\n\t\terrRead6 error\n\t\tread int64\n\t\tread6 int64\n\t)\n\n\tfor _, p := range namespaceProcs {\n\t\tdirName := strconv.Itoa(p.PID)\n\n\t\tread, errRead = readFile(filepath.Join(procRoot, dirName, \"\/net\/tcp\"), buf)\n\t\tread6, errRead6 = readFile(filepath.Join(procRoot, dirName, \"\/net\/tcp6\"), buf)\n\n\t\tif errRead != nil || errRead6 != nil {\n\t\t\t\/\/ try next process\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Return after succeeding on any process\n\t\t\/\/ (proc\/PID\/net\/tcp and proc\/PID\/net\/tcp6 are identical for all the processes in the same namespace)\n\t\treturn read+read6 > 0, nil\n\t}\n\n\t\/\/ It would be cool to have an \"or\" error combinator\n\tif errRead != nil {\n\t\treturn false, errRead\n\t}\n\tif errRead6 != nil {\n\t\treturn false, errRead6\n\t}\n\n\treturn false, nil\n\n}\n\n\/\/ walkNamespace does the work of walk for a single namespace\nfunc (w pidWalker) walkNamespace(namespaceID uint64, buf *bytes.Buffer, sockets map[uint64]*Proc, namespaceProcs []*process.Process) error {\n\n\tif found, err := readProcessConnections(buf, namespaceProcs); err != nil || !found {\n\t\treturn err\n\t}\n\n\tvar statT syscall.Stat_t\n\tvar fdBlockCount uint64\n\tfor i, p := range namespaceProcs {\n\n\t\t\/\/ Get the sockets for all the processes in the namespace\n\t\tdirName := strconv.Itoa(p.PID)\n\t\tfdBase := filepath.Join(procRoot, dirName, \"fd\")\n\n\t\tif fdBlockCount > w.fdBlockSize {\n\t\t\t\/\/ we surpassed the filedescriptor rate limit\n\t\t\tselect {\n\t\t\tcase <-w.tickc:\n\t\t\tcase <-w.stopc:\n\t\t\t\treturn nil \/\/ abort\n\t\t\t}\n\n\t\t\tfdBlockCount = 0\n\t\t\t\/\/ read the connections again to\n\t\t\t\/\/ avoid the race between between \/net\/tcp{,6} and \/proc\/PID\/fd\/*\n\t\t\tif found, err := readProcessConnections(buf, namespaceProcs[i:]); err != nil || !found {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfds, err := fs.ReadDirNames(fdBase)\n\t\tif err != nil {\n\t\t\t\/\/ Process is gone by now, or we don't have access.\n\t\t\tcontinue\n\t\t}\n\n\t\tvar proc *Proc\n\t\tfor _, fd := range fds {\n\t\t\tfdBlockCount++\n\n\t\t\t\/\/ Direct use of syscall.Stat() to save garbage.\n\t\t\terr = fs.Stat(filepath.Join(fdBase, fd), &statT)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ We want sockets only.\n\t\t\tif statT.Mode&syscall.S_IFMT != syscall.S_IFSOCK {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Initialize proc lazily to avoid creating unnecessary\n\t\t\t\/\/ garbage\n\t\t\tif proc == nil {\n\t\t\t\tproc = &Proc{\n\t\t\t\t\tPID: uint(p.PID),\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tNetNamespaceID: namespaceID,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsockets[statT.Ino] = proc\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\n\/\/ walk walks over all numerical (PID) \/proc entries. It reads\n\/\/ \/proc\/PID\/net\/tcp{,6} for each namespace and sees if the .\/fd\/* files of each\n\/\/ process in that namespace are symlinks to sockets. Returns a map from socket\n\/\/ ID (inode) to PID.\nfunc (w pidWalker) walk(buf *bytes.Buffer) (map[uint64]*Proc, error) {\n\tvar (\n\t\tsockets = map[uint64]*Proc{} \/\/ map socket inode -> process\n\t\tnamespaces = map[uint64][]*process.Process{} \/\/ map network namespace id -> processes\n\t\tstatT syscall.Stat_t\n\t)\n\n\t\/\/ We do two process traversals: One to group processes by namespace and\n\t\/\/ another one to obtain their connections.\n\t\/\/\n\t\/\/ The first traversal is needed to allow obtaining the connections on a\n\t\/\/ per-namespace basis. This is done to minimize the race condition\n\t\/\/ between reading \/net\/tcp{,6} of each namespace and \/proc\/PID\/fd\/* for\n\t\/\/ the processes living in that namespace.\n\n\tw.walker.Walk(func(p, _ process.Process) {\n\t\tdirName := strconv.Itoa(p.PID)\n\n\t\tnetNamespacePath := filepath.Join(procRoot, dirName, getNetNamespacePathSuffix())\n\t\tif err := fs.Stat(netNamespacePath, &statT); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tnamespaceID := statT.Ino\n\t\tnamespaces[namespaceID] = append(namespaces[namespaceID], &p)\n\t})\n\n\tfor namespaceID, procs := range namespaces {\n\t\tselect {\n\t\tcase <-w.tickc:\n\t\t\tw.walkNamespace(namespaceID, buf, sockets, procs)\n\t\tcase <-w.stopc:\n\t\t\tbreak \/\/ abort\n\t\t}\n\t}\n\n\tmetrics.SetGauge(namespaceKey, float32(len(namespaces)))\n\treturn sockets, nil\n}\n\nfunc (w pidWalker) stop() {\n\tclose(w.stopc)\n}\n\n\/\/ readFile reads an arbitrary file into a buffer.\nfunc readFile(filename string, buf *bytes.Buffer) (int64, error) {\n\tf, err := fs.Open(filename)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer f.Close()\n\treturn buf.ReadFrom(f)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Generated struct for models.\npackage models\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/lann\/squirrel\"\n)\n\n\/\/ GenerateCreateSQL generates plain sql for the given Profile\nfunc (p *Profile) GenerateCreateSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Insert(p.TableName())\n\tcolumns := make([]string, 0)\n\tvalues := make([]interface{}, 0)\n\tif p.AvatarURL != \"\" {\n\t\tcolumns = append(columns, \"avatar_url\")\n\t\tvalues = append(values, p.AvatarURL)\n\t}\n\tif !p.CreatedAt.IsZero() {\n\t\tcolumns = append(columns, \"created_at\")\n\t\tvalues = append(values, p.CreatedAt)\n\t}\n\tif p.Description != \"\" {\n\t\tcolumns = append(columns, \"description\")\n\t\tvalues = append(values, p.Description)\n\t}\n\tif float64(p.ID) != float64(0) {\n\t\tcolumns = append(columns, \"id\")\n\t\tvalues = append(values, p.ID)\n\t}\n\tif p.LinkColor != \"\" {\n\t\tcolumns = append(columns, \"link_color\")\n\t\tvalues = append(values, p.LinkColor)\n\t}\n\tif p.Location != \"\" {\n\t\tcolumns = append(columns, \"location\")\n\t\tvalues = append(values, p.Location)\n\t}\n\tif p.ScreenName != \"\" {\n\t\tcolumns = append(columns, \"screen_name\")\n\t\tvalues = append(values, p.ScreenName)\n\t}\n\tif p.URL != \"\" {\n\t\tcolumns = append(columns, \"url\")\n\t\tvalues = append(values, p.URL)\n\t}\n\treturn psql.Columns(columns...).Values(values...).ToSql()\n}\n\n\/\/ GenerateUpdateSQL generates plain update sql statement for the given Profile\nfunc (p *Profile) GenerateUpdateSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Update(p.TableName())\n\tif p.AvatarURL != \"\" {\n\t\tpsql = psql.Set(\"avatar_url\", p.AvatarURL)\n\t}\n\tif !p.CreatedAt.IsZero() {\n\t\tpsql = psql.Set(\"created_at\", p.CreatedAt)\n\t}\n\tif p.Description != \"\" {\n\t\tpsql = psql.Set(\"description\", p.Description)\n\t}\n\tif float64(p.ID) != float64(0) {\n\t\tpsql = psql.Set(\"id\", p.ID)\n\t}\n\tif p.LinkColor != \"\" {\n\t\tpsql = psql.Set(\"link_color\", p.LinkColor)\n\t}\n\tif p.Location != \"\" {\n\t\tpsql = psql.Set(\"location\", p.Location)\n\t}\n\tif p.ScreenName != \"\" {\n\t\tpsql = psql.Set(\"screen_name\", p.ScreenName)\n\t}\n\tif p.URL != \"\" {\n\t\tpsql = psql.Set(\"url\", p.URL)\n\t}\n\treturn psql.Where(\"id = ?\", p.ID).ToSql()\n}\n\n\/\/ GenerateDeleteSQL generates plain delete sql statement for the given Profile\nfunc (p *Profile) GenerateDeleteSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Delete(p.TableName())\n\tcolumns := make([]string, 0)\n\tvalues := make([]interface{}, 0)\n\tif p.AvatarURL != \"\" {\n\t\tcolumns = append(columns, \"avatar_url = ?\")\n\t\tvalues = append(values, p.AvatarURL)\n\t}\n\tif !p.CreatedAt.IsZero() {\n\t\tcolumns = append(columns, \"created_at = ?\")\n\t\tvalues = append(values, p.CreatedAt)\n\t}\n\tif p.Description != \"\" {\n\t\tcolumns = append(columns, \"description = ?\")\n\t\tvalues = append(values, p.Description)\n\t}\n\tif float64(p.ID) != float64(0) {\n\t\tcolumns = append(columns, \"id = ?\")\n\t\tvalues = append(values, p.ID)\n\t}\n\tif p.LinkColor != \"\" {\n\t\tcolumns = append(columns, \"link_color = ?\")\n\t\tvalues = append(values, p.LinkColor)\n\t}\n\tif p.Location != \"\" {\n\t\tcolumns = append(columns, \"location = ?\")\n\t\tvalues = append(values, p.Location)\n\t}\n\tif p.ScreenName != \"\" {\n\t\tcolumns = append(columns, \"screen_name = ?\")\n\t\tvalues = append(values, p.ScreenName)\n\t}\n\tif p.URL != \"\" {\n\t\tcolumns = append(columns, \"url = ?\")\n\t\tvalues = append(values, p.URL)\n\t}\n\treturn psql.Where(strings.Join(columns, \" AND \"), values...).ToSql()\n}\n\n\/\/ GenerateSelectSQL generates plain select sql statement for the given Profile\nfunc (p *Profile) GenerateSelectSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Select(\"*\").From(p.TableName())\n\tcolumns := make([]string, 0)\n\tvalues := make([]interface{}, 0)\n\tif p.AvatarURL != \"\" {\n\t\tcolumns = append(columns, \"avatar_url = ?\")\n\t\tvalues = append(values, p.AvatarURL)\n\t}\n\tif !p.CreatedAt.IsZero() {\n\t\tcolumns = append(columns, \"created_at = ?\")\n\t\tvalues = append(values, p.CreatedAt)\n\t}\n\tif p.Description != \"\" {\n\t\tcolumns = append(columns, \"description = ?\")\n\t\tvalues = append(values, p.Description)\n\t}\n\tif float64(p.ID) != float64(0) {\n\t\tcolumns = append(columns, \"id = ?\")\n\t\tvalues = append(values, p.ID)\n\t}\n\tif p.LinkColor != \"\" {\n\t\tcolumns = append(columns, \"link_color = ?\")\n\t\tvalues = append(values, p.LinkColor)\n\t}\n\tif p.Location != \"\" {\n\t\tcolumns = append(columns, \"location = ?\")\n\t\tvalues = append(values, p.Location)\n\t}\n\tif p.ScreenName != \"\" {\n\t\tcolumns = append(columns, \"screen_name = ?\")\n\t\tvalues = append(values, p.ScreenName)\n\t}\n\tif p.URL != \"\" {\n\t\tcolumns = append(columns, \"url = ?\")\n\t\tvalues = append(values, p.URL)\n\t}\n\treturn psql.Where(strings.Join(columns, \" AND \"), values...).ToSql()\n}\n\n\/\/ TableName returns the table name for Profile\nfunc (p *Profile) TableName() string {\n\treturn \"profile\"\n}\n<commit_msg>Gene: reorder fix<commit_after>\/\/ Generated struct for models.\npackage models\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/lann\/squirrel\"\n)\n\n\/\/ GenerateCreateSQL generates plain sql for the given Account\nfunc (a *Account) GenerateCreateSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Insert(a.TableName())\n\tcolumns := make([]string, 0)\n\tvalues := make([]interface{}, 0)\n\tif !a.CreatedAt.IsZero() {\n\t\tcolumns = append(columns, \"created_at\")\n\t\tvalues = append(values, a.CreatedAt)\n\t}\n\tif a.EmailAddress != \"\" {\n\t\tcolumns = append(columns, \"email_address\")\n\t\tvalues = append(values, a.EmailAddress)\n\t}\n\tif a.EmailStatusConstant != \"\" {\n\t\tcolumns = append(columns, \"email_status_constant\")\n\t\tvalues = append(values, a.EmailStatusConstant)\n\t}\n\tif float64(a.ID) != float64(0) {\n\t\tcolumns = append(columns, \"id\")\n\t\tvalues = append(values, a.ID)\n\t}\n\tif a.Password != \"\" {\n\t\tcolumns = append(columns, \"password\")\n\t\tvalues = append(values, a.Password)\n\t}\n\tif a.PasswordStatusConstant != \"\" {\n\t\tcolumns = append(columns, \"password_status_constant\")\n\t\tvalues = append(values, a.PasswordStatusConstant)\n\t}\n\tif float64(a.ProfileID) != float64(0) {\n\t\tcolumns = append(columns, \"profile_id\")\n\t\tvalues = append(values, a.ProfileID)\n\t}\n\tif a.Salt != \"\" {\n\t\tcolumns = append(columns, \"salt\")\n\t\tvalues = append(values, a.Salt)\n\t}\n\tif a.StatusConstant != \"\" {\n\t\tcolumns = append(columns, \"status_constant\")\n\t\tvalues = append(values, a.StatusConstant)\n\t}\n\tif a.URL != \"\" {\n\t\tcolumns = append(columns, \"url\")\n\t\tvalues = append(values, a.URL)\n\t}\n\tif a.URLName != \"\" {\n\t\tcolumns = append(columns, \"url_name\")\n\t\tvalues = append(values, a.URLName)\n\t}\n\treturn psql.Columns(columns...).Values(values...).ToSql()\n}\n\n\/\/ GenerateUpdateSQL generates plain update sql statement for the given Account\nfunc (a *Account) GenerateUpdateSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Update(a.TableName())\n\tif !a.CreatedAt.IsZero() {\n\t\tpsql = psql.Set(\"created_at\", a.CreatedAt)\n\t}\n\tif a.EmailAddress != \"\" {\n\t\tpsql = psql.Set(\"email_address\", a.EmailAddress)\n\t}\n\tif a.EmailStatusConstant != \"\" {\n\t\tpsql = psql.Set(\"email_status_constant\", a.EmailStatusConstant)\n\t}\n\tif float64(a.ID) != float64(0) {\n\t\tpsql = psql.Set(\"id\", a.ID)\n\t}\n\tif a.Password != \"\" {\n\t\tpsql = psql.Set(\"password\", a.Password)\n\t}\n\tif a.PasswordStatusConstant != \"\" {\n\t\tpsql = psql.Set(\"password_status_constant\", a.PasswordStatusConstant)\n\t}\n\tif float64(a.ProfileID) != float64(0) {\n\t\tpsql = psql.Set(\"profile_id\", a.ProfileID)\n\t}\n\tif a.Salt != \"\" {\n\t\tpsql = psql.Set(\"salt\", a.Salt)\n\t}\n\tif a.StatusConstant != \"\" {\n\t\tpsql = psql.Set(\"status_constant\", a.StatusConstant)\n\t}\n\tif a.URL != \"\" {\n\t\tpsql = psql.Set(\"url\", a.URL)\n\t}\n\tif a.URLName != \"\" {\n\t\tpsql = psql.Set(\"url_name\", a.URLName)\n\t}\n\treturn psql.Where(\"id = ?\", a.ID).ToSql()\n}\n\n\/\/ GenerateDeleteSQL generates plain delete sql statement for the given Account\nfunc (a *Account) GenerateDeleteSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Delete(a.TableName())\n\tcolumns := make([]string, 0)\n\tvalues := make([]interface{}, 0)\n\tif !a.CreatedAt.IsZero() {\n\t\tcolumns = append(columns, \"created_at = ?\")\n\t\tvalues = append(values, a.CreatedAt)\n\t}\n\tif a.EmailAddress != \"\" {\n\t\tcolumns = append(columns, \"email_address = ?\")\n\t\tvalues = append(values, a.EmailAddress)\n\t}\n\tif a.EmailStatusConstant != \"\" {\n\t\tcolumns = append(columns, \"email_status_constant = ?\")\n\t\tvalues = append(values, a.EmailStatusConstant)\n\t}\n\tif float64(a.ID) != float64(0) {\n\t\tcolumns = append(columns, \"id = ?\")\n\t\tvalues = append(values, a.ID)\n\t}\n\tif a.Password != \"\" {\n\t\tcolumns = append(columns, \"password = ?\")\n\t\tvalues = append(values, a.Password)\n\t}\n\tif a.PasswordStatusConstant != \"\" {\n\t\tcolumns = append(columns, \"password_status_constant = ?\")\n\t\tvalues = append(values, a.PasswordStatusConstant)\n\t}\n\tif float64(a.ProfileID) != float64(0) {\n\t\tcolumns = append(columns, \"profile_id = ?\")\n\t\tvalues = append(values, a.ProfileID)\n\t}\n\tif a.Salt != \"\" {\n\t\tcolumns = append(columns, \"salt = ?\")\n\t\tvalues = append(values, a.Salt)\n\t}\n\tif a.StatusConstant != \"\" {\n\t\tcolumns = append(columns, \"status_constant = ?\")\n\t\tvalues = append(values, a.StatusConstant)\n\t}\n\tif a.URL != \"\" {\n\t\tcolumns = append(columns, \"url = ?\")\n\t\tvalues = append(values, a.URL)\n\t}\n\tif a.URLName != \"\" {\n\t\tcolumns = append(columns, \"url_name = ?\")\n\t\tvalues = append(values, a.URLName)\n\t}\n\treturn psql.Where(strings.Join(columns, \" AND \"), values...).ToSql()\n}\n\n\/\/ GenerateSelectSQL generates plain select sql statement for the given Account\nfunc (a *Account) GenerateSelectSQL() (string, []interface{}, error) {\n\tpsql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).Select(\"*\").From(a.TableName())\n\tcolumns := make([]string, 0)\n\tvalues := make([]interface{}, 0)\n\tif !a.CreatedAt.IsZero() {\n\t\tcolumns = append(columns, \"created_at = ?\")\n\t\tvalues = append(values, a.CreatedAt)\n\t}\n\tif a.EmailAddress != \"\" {\n\t\tcolumns = append(columns, \"email_address = ?\")\n\t\tvalues = append(values, a.EmailAddress)\n\t}\n\tif a.EmailStatusConstant != \"\" {\n\t\tcolumns = append(columns, \"email_status_constant = ?\")\n\t\tvalues = append(values, a.EmailStatusConstant)\n\t}\n\tif float64(a.ID) != float64(0) {\n\t\tcolumns = append(columns, \"id = ?\")\n\t\tvalues = append(values, a.ID)\n\t}\n\tif a.Password != \"\" {\n\t\tcolumns = append(columns, \"password = ?\")\n\t\tvalues = append(values, a.Password)\n\t}\n\tif a.PasswordStatusConstant != \"\" {\n\t\tcolumns = append(columns, \"password_status_constant = ?\")\n\t\tvalues = append(values, a.PasswordStatusConstant)\n\t}\n\tif float64(a.ProfileID) != float64(0) {\n\t\tcolumns = append(columns, \"profile_id = ?\")\n\t\tvalues = append(values, a.ProfileID)\n\t}\n\tif a.Salt != \"\" {\n\t\tcolumns = append(columns, \"salt = ?\")\n\t\tvalues = append(values, a.Salt)\n\t}\n\tif a.StatusConstant != \"\" {\n\t\tcolumns = append(columns, \"status_constant = ?\")\n\t\tvalues = append(values, a.StatusConstant)\n\t}\n\tif a.URL != \"\" {\n\t\tcolumns = append(columns, \"url = ?\")\n\t\tvalues = append(values, a.URL)\n\t}\n\tif a.URLName != \"\" {\n\t\tcolumns = append(columns, \"url_name = ?\")\n\t\tvalues = append(values, a.URLName)\n\t}\n\treturn psql.Where(strings.Join(columns, \" AND \"), values...).ToSql()\n}\n\n\/\/ TableName returns the table name for Account\nfunc (a *Account) TableName() string {\n\treturn \"account\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ link.go -- link level protocol\n\/\/\npackage samtun\n\nimport (\n \"bitbucket.org\/majestrate\/sam3\"\n \"bytes\"\n \"encoding\/binary\"\n \"net\"\n)\n\ntype ipPacket []byte\n\nfunc (pkt ipPacket) setDst(addr net.IP) {\n pkt[16] = addr[0]\n pkt[17] = addr[1]\n pkt[18] = addr[2]\n pkt[19] = addr[3]\n}\n\nfunc (pkt ipPacket) setSrc(addr net.IP) {\n pkt[12] = addr[0]\n pkt[13] = addr[1]\n pkt[14] = addr[2]\n pkt[15] = addr[3] \n}\n\nfunc (pkt ipPacket) Dst() net.IP {\n return net.IPv4(pkt[16], pkt[17], pkt[18], pkt[19])\n}\n\nfunc (pkt ipPacket) Src() net.IP {\n return net.IPv4(pkt[12], pkt[13], pkt[14], pkt[15])\n}\n\n\/\/ a link level message\n\/\/ infact a bunch of ip packets\ntype linkFrame []ipPacket\n\n\/\/ get a raw bytes representation of this frame\nfunc (f linkFrame) Bytes() []byte {\n var buff bytes.Buffer\n \/\/ link frame protocol version\n buff.Write([]byte{0})\n \/\/ link frame packet count\n pkts := len(f)\n buff.Write([]byte{byte(pkts)})\n \/\/ for each packet\n for _, pkt := range f {\n \/\/ pack the packet int our buffer\n pktbuff := make([]byte, len(pkt) + 2)\n binary.BigEndian.PutUint16(pktbuff[:], uint16(len(pkt)))\n copy(pktbuff[:2], pkt)\n buff.Write(pktbuff)\n }\n return buff.Bytes()\n}\n\n\/\/ create a linkFrame from a byteslice\n\/\/ returns nil if format invalid\nfunc frameFromBytes(buff []byte) (f linkFrame) {\n if buff[0] == 0x00 {\n pkts := buff[1]\n idx := 0\n for pkts > 0 {\n plen := binary.BigEndian.Uint16(buff[:2+idx])\n if 2 + int(plen) + idx < len(buff) {\n pkt := make([]byte, int(plen))\n copy(pkt, buff[idx+2:2+len(pkt)+idx])\n f = append(f, ipPacket(pkt))\n pkts--\n idx += len(pkt) + 2\n } else {\n return nil\n }\n }\n }\n return\n}\n\n\/\/ a link layer message that is sent over i2p\ntype linkMessage struct {\n frame linkFrame\n addr sam3.I2PAddr\n}\n\n\nfunc (m *linkMessage) appendPacket(pkt ipPacket) {\n m.frame = append(m.frame, pkt)\n}\n\n\/\/ get raw bytes representation to send over the wire\nfunc (m *linkMessage) WireBytes() []byte {\n return m.frame.Bytes()\n}\n\ntype linkProtocol struct {\n \/\/ key name for link message data\n msgdata string\n \/\/ key name for link protocol version number\n version string\n \/\/ key name for link message type\n msgtype string\n}\n\n\/\/ given a byte slice, read out a linkMessage\n\/\/func (p linkProtocol) Parse(data []byte) (pkts []packet, err error) {\n\/\/ bencode.\n\/\/}\n\nvar link = linkProtocol{\n msgdata: \"x\",\n msgtype: \"w\",\n version: \"v\",\n}\n\n<commit_msg>wrong slice<commit_after>\/\/\n\/\/ link.go -- link level protocol\n\/\/\npackage samtun\n\nimport (\n \"bitbucket.org\/majestrate\/sam3\"\n \"bytes\"\n \"encoding\/binary\"\n \"net\"\n)\n\ntype ipPacket []byte\n\nfunc (pkt ipPacket) setDst(addr net.IP) {\n pkt[16] = addr[0]\n pkt[17] = addr[1]\n pkt[18] = addr[2]\n pkt[19] = addr[3]\n}\n\nfunc (pkt ipPacket) setSrc(addr net.IP) {\n pkt[12] = addr[0]\n pkt[13] = addr[1]\n pkt[14] = addr[2]\n pkt[15] = addr[3] \n}\n\nfunc (pkt ipPacket) Dst() net.IP {\n return net.IPv4(pkt[16], pkt[17], pkt[18], pkt[19])\n}\n\nfunc (pkt ipPacket) Src() net.IP {\n return net.IPv4(pkt[12], pkt[13], pkt[14], pkt[15])\n}\n\n\/\/ a link level message\n\/\/ infact a bunch of ip packets\ntype linkFrame []ipPacket\n\n\/\/ get a raw bytes representation of this frame\nfunc (f linkFrame) Bytes() []byte {\n var buff bytes.Buffer\n \/\/ link frame protocol version\n buff.Write([]byte{0})\n \/\/ link frame packet count\n pkts := len(f)\n buff.Write([]byte{byte(pkts)})\n \/\/ for each packet\n for _, pkt := range f {\n \/\/ pack the packet int our buffer\n pktbuff := make([]byte, len(pkt) + 2)\n binary.BigEndian.PutUint16(pktbuff[:], uint16(len(pkt)))\n copy(pktbuff[2:], pkt)\n buff.Write(pktbuff)\n }\n return buff.Bytes()\n}\n\n\/\/ create a linkFrame from a byteslice\n\/\/ returns nil if format invalid\nfunc frameFromBytes(buff []byte) (f linkFrame) {\n if buff[0] == 0x00 {\n pkts := buff[1]\n idx := 0\n for pkts > 0 {\n plen := binary.BigEndian.Uint16(buff[:2+idx])\n if 2 + int(plen) + idx < len(buff) {\n pkt := make([]byte, int(plen))\n copy(pkt, buff[idx+2:2+len(pkt)+idx])\n f = append(f, ipPacket(pkt))\n pkts--\n idx += len(pkt) + 2\n } else {\n return nil\n }\n }\n }\n return\n}\n\n\/\/ a link layer message that is sent over i2p\ntype linkMessage struct {\n frame linkFrame\n addr sam3.I2PAddr\n}\n\n\nfunc (m *linkMessage) appendPacket(pkt ipPacket) {\n m.frame = append(m.frame, pkt)\n}\n\n\/\/ get raw bytes representation to send over the wire\nfunc (m *linkMessage) WireBytes() []byte {\n return m.frame.Bytes()\n}\n\ntype linkProtocol struct {\n \/\/ key name for link message data\n msgdata string\n \/\/ key name for link protocol version number\n version string\n \/\/ key name for link message type\n msgtype string\n}\n\n\/\/ given a byte slice, read out a linkMessage\n\/\/func (p linkProtocol) Parse(data []byte) (pkts []packet, err error) {\n\/\/ bencode.\n\/\/}\n\nvar link = linkProtocol{\n msgdata: \"x\",\n msgtype: \"w\",\n version: \"v\",\n}\n\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bobbytables\/gangway\/data\"\n)\n\n\/\/ getDefinitionsResp contains the structure of the response\n\/\/ that gangway will return when asked about all definitions\ntype getDefinitionsResp struct {\n\tDefinitions []data.Definition `json:\"definitions\"`\n}\n\n\/\/ postDefinitionsResp contains the structure of the response\n\/\/ that gangway returns upon a successful add of a definition\ntype postDefinitionsResp struct {\n\tDefinition data.Definition `json:\"definition\"`\n}\n\nfunc (s *Server) getDefinitions(w http.ResponseWriter, r *http.Request) {\n\tdefs, err := s.store.RetrieveDefinitions()\n\tif err != nil {\n\t\tlogrus.WithError(err).Error()\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresp := getDefinitionsResp{Definitions: defs}\n\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tlogrus.WithError(err).Error()\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\n\/\/ postDefinitions handles adding definitions to the underlying store\nfunc (s *Server) postDefinitions(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tvar d data.Definition\n\tif err := json.NewDecoder(r.Body).Decode(&d); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif vErr := data.IsValidDefinition(d); vErr != nil {\n\t\ts.writeError(w, vErr, 422)\n\t\treturn\n\t}\n\n\tif err := s.store.AddDefinition(d); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tresp := postDefinitionsResp{Definition: d}\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n}\n<commit_msg>Add better errors.<commit_after>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bobbytables\/gangway\/data\"\n)\n\n\/\/ getDefinitionsResp contains the structure of the response\n\/\/ that gangway will return when asked about all definitions\ntype getDefinitionsResp struct {\n\tDefinitions []data.Definition `json:\"definitions\"`\n}\n\n\/\/ postDefinitionsResp contains the structure of the response\n\/\/ that gangway returns upon a successful add of a definition\ntype postDefinitionsResp struct {\n\tDefinition data.Definition `json:\"definition\"`\n}\n\nfunc (s *Server) getDefinitions(w http.ResponseWriter, r *http.Request) {\n\tdefs, err := s.store.RetrieveDefinitions()\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"could not retrieve definitions\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresp := getDefinitionsResp{Definitions: defs}\n\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tlogrus.WithError(err).Error(\"could not encode definitions\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\n\/\/ postDefinitions handles adding definitions to the underlying store\nfunc (s *Server) postDefinitions(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tvar d data.Definition\n\tif err := json.NewDecoder(r.Body).Decode(&d); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif vErr := data.IsValidDefinition(d); vErr != nil {\n\t\ts.writeError(w, vErr, 422)\n\t\treturn\n\t}\n\n\tif err := s.store.AddDefinition(d); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tresp := postDefinitionsResp{Definition: d}\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/websocket\"\n)\n\nvar address = flag.String(\"address\", \":8080\", \"The address to bind on.\")\n\nvar upgrader = websocket.Upgrader{}\n\nfunc wsHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to upgrade connection: %v\\n\", err)\n\t\thttp.Error(w, \"Connection failed.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tdata := make(map[string]interface{})\n\terr = conn.ReadJSON(&data)\n\tif err != nil {\n\t\tlog.Printf(\"Error decoding json: %v\\n\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Got data: %s\", data)\n\n\tdata = make(map[string]interface{})\n\tdata[\"hi_there\"] = \"This is the server\"\n\terr = conn.WriteJSON(data)\n\tif err != nil {\n\t\tlog.Printf(\"Error decoding json: %v\\n\", err)\n\t\treturn\n\t}\n}\n\nfunc main() {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"\/ws\", wsHandler)\n\n\tlog.Printf(\"Starting on %s\\n\", *address)\n\tlog.Fatalln(http.ListenAndServe(*address, r))\n}\n<commit_msg>Added serving the frontend<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nvar address = flag.String(\"address\", \":8080\", \"The address to bind on.\")\nvar staticFilesPath = flag.String(\n\t\"static_files_path\", \"..\/frontend\/\", \"The static files to use.\")\n\nvar upgrader = websocket.Upgrader{}\n\nfunc wsHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to upgrade connection: %v\\n\", err)\n\t\thttp.Error(w, \"Connection failed.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tdata := make(map[string]interface{})\n\terr = conn.ReadJSON(&data)\n\tif err != nil {\n\t\tlog.Printf(\"Error decoding json: %v\\n\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Got data: %s\", data)\n\n\tdata = make(map[string]interface{})\n\tdata[\"hi_there\"] = \"This is the server\"\n\terr = conn.WriteJSON(data)\n\tif err != nil {\n\t\tlog.Printf(\"Error decoding json: %v\\n\", err)\n\t\treturn\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\trootRouter := http.NewServeMux()\n\trootRouter.Handle(\"\/\", http.FileServer(http.Dir(*staticFilesPath)))\n\trootRouter.HandleFunc(\"\/ws\", wsHandler)\n\n\tlog.Printf(\"Starting on %s\\n\", *address)\n\tlog.Fatalln(http.ListenAndServe(*address, rootRouter))\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"net\"\n\t\"sync\"\n)\n\ntype NodeManager struct {\n\tparams *NodeManagerParams\n\tlogger Logger\n\twg sync.WaitGroup\n\tmu sync.Mutex\n\ttransportManager *TransportManager\n\trooms map[string]struct{}\n}\n\ntype NodeManagerParams struct {\n\tLoggerFactory LoggerFactory\n\tRoomManager *ChannelRoomManager\n\tTracksManager TracksManager\n\tListenAddr *net.UDPAddr\n\tNodes []*net.UDPAddr\n}\n\nfunc NewNodeManager(params NodeManagerParams) (*NodeManager, error) {\n\tlogger := params.LoggerFactory.GetLogger(\"nodemanager\")\n\n\tconn, err := net.ListenUDP(\"udp\", params.ListenAddr)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Printf(\"Listening on UDP port: %s\", conn.LocalAddr().String())\n\n\ttransportManager := NewTransportManager(TransportManagerParams{\n\t\tConn: conn,\n\t\tLoggerFactory: params.LoggerFactory,\n\t})\n\n\tnm := &NodeManager{\n\t\tparams: ¶ms,\n\t\ttransportManager: transportManager,\n\t\tlogger: logger,\n\t\trooms: map[string]struct{}{},\n\t}\n\n\tfor _, addr := range params.Nodes {\n\t\tlogger.Printf(\"Configuring remote node: %s\", addr.String())\n\n\t\tfactory, err := transportManager.GetTransportFactory(addr)\n\t\tif err != nil {\n\t\t\tnm.logger.Println(\"Error creating transport factory for remote addr: %s\", addr)\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n\n\tgo nm.startTransportEventLoop()\n\tgo nm.startRoomEventLoop()\n\n\treturn nm, nil\n}\n\nfunc (nm *NodeManager) startTransportEventLoop() {\n\tfor {\n\t\tfactory, err := nm.transportManager.AcceptTransportFactory()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting transport factory: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n}\n\nfunc (nm *NodeManager) handleServerTransportFactory(factory *ServerTransportFactory) {\n\tnm.wg.Add(1)\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tdoneChan := make(chan struct{})\n\t\tcloseChannelOnce := sync.Once{}\n\n\t\tdone := func() {\n\t\t\tcloseChannelOnce.Do(func() {\n\t\t\t\tclose(doneChan)\n\t\t\t})\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneChan:\n\t\t\t\tnm.logger.Printf(\"Aborting server transport factory goroutine\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\ttransportPromise := factory.AcceptTransport()\n\t\t\tnm.handleTransportPromise(transportPromise)\n\n\t\t\tnm.wg.Add(1)\n\t\t\tgo func(p *TransportPromise) {\n\t\t\t\tdefer nm.wg.Done()\n\n\t\t\t\t_, err := p.Wait()\n\t\t\t\tif err != nil {\n\t\t\t\t\tnm.logger.Printf(\"Error while waiting for TransportPromise: %s\", err)\n\t\t\t\t\tdone()\n\t\t\t\t}\n\t\t\t}(transportPromise)\n\t\t}\n\t}()\n}\n\nfunc (nm *NodeManager) handleTransportPromise(transportPromise *TransportPromise) {\n\tnm.wg.Add(1)\n\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tstreamTransport, err := transportPromise.Wait()\n\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error waiting for transport promise: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.mu.Lock()\n\t\tdefer nm.mu.Unlock()\n\n\t\tnm.params.TracksManager.Add(transportPromise.StreamID(), streamTransport)\n\t}()\n}\n\nfunc (nm *NodeManager) startRoomEventLoop() {\n\tfor {\n\t\troomEvent, err := nm.params.RoomManager.AcceptEvent()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting room event: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch roomEvent.Type {\n\t\tcase RoomEventTypeAdd:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\ttransportPromise := factory.NewTransport(roomEvent.RoomName)\n\t\t\t\tnm.handleTransportPromise(transportPromise)\n\t\t\t}\n\t\tcase RoomEventTypeRemove:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\tfactory.CloseTransport(roomEvent.RoomName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (nm *NodeManager) Close() error {\n\tnm.params.RoomManager.Close()\n\tnm.transportManager.Close()\n\n\tnm.wg.Wait()\n\n\treturn nil\n}\n<commit_msg>Add more logging to NodeManager<commit_after>package server\n\nimport (\n\t\"net\"\n\t\"sync\"\n)\n\ntype NodeManager struct {\n\tparams *NodeManagerParams\n\tlogger Logger\n\twg sync.WaitGroup\n\tmu sync.Mutex\n\ttransportManager *TransportManager\n\trooms map[string]struct{}\n}\n\ntype NodeManagerParams struct {\n\tLoggerFactory LoggerFactory\n\tRoomManager *ChannelRoomManager\n\tTracksManager TracksManager\n\tListenAddr *net.UDPAddr\n\tNodes []*net.UDPAddr\n}\n\nfunc NewNodeManager(params NodeManagerParams) (*NodeManager, error) {\n\tlogger := params.LoggerFactory.GetLogger(\"nodemanager\")\n\n\tconn, err := net.ListenUDP(\"udp\", params.ListenAddr)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Printf(\"Listening on UDP port: %s\", conn.LocalAddr().String())\n\n\ttransportManager := NewTransportManager(TransportManagerParams{\n\t\tConn: conn,\n\t\tLoggerFactory: params.LoggerFactory,\n\t})\n\n\tnm := &NodeManager{\n\t\tparams: ¶ms,\n\t\ttransportManager: transportManager,\n\t\tlogger: logger,\n\t\trooms: map[string]struct{}{},\n\t}\n\n\tfor _, addr := range params.Nodes {\n\t\tlogger.Printf(\"Configuring remote node: %s\", addr.String())\n\n\t\tfactory, err := transportManager.GetTransportFactory(addr)\n\t\tif err != nil {\n\t\t\tnm.logger.Println(\"Error creating transport factory for remote addr: %s\", addr)\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n\n\tgo nm.startTransportEventLoop()\n\tgo nm.startRoomEventLoop()\n\n\treturn nm, nil\n}\n\nfunc (nm *NodeManager) startTransportEventLoop() {\n\tfor {\n\t\tfactory, err := nm.transportManager.AcceptTransportFactory()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting transport factory: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n}\n\nfunc (nm *NodeManager) handleServerTransportFactory(factory *ServerTransportFactory) {\n\tnm.wg.Add(1)\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tdoneChan := make(chan struct{})\n\t\tcloseChannelOnce := sync.Once{}\n\n\t\tdone := func() {\n\t\t\tcloseChannelOnce.Do(func() {\n\t\t\t\tclose(doneChan)\n\t\t\t})\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneChan:\n\t\t\t\tnm.logger.Printf(\"Aborting server transport factory goroutine\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\ttransportPromise := factory.AcceptTransport()\n\t\t\tnm.handleTransportPromise(transportPromise)\n\n\t\t\tnm.wg.Add(1)\n\t\t\tgo func(p *TransportPromise) {\n\t\t\t\tdefer nm.wg.Done()\n\n\t\t\t\t_, err := p.Wait()\n\t\t\t\tif err != nil {\n\t\t\t\t\tnm.logger.Printf(\"Error while waiting for TransportPromise: %s\", err)\n\t\t\t\t\tdone()\n\t\t\t\t}\n\t\t\t}(transportPromise)\n\t\t}\n\t}()\n}\n\nfunc (nm *NodeManager) handleTransportPromise(transportPromise *TransportPromise) {\n\tnm.wg.Add(1)\n\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tstreamTransport, err := transportPromise.Wait()\n\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error waiting for transport promise: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.mu.Lock()\n\t\tdefer nm.mu.Unlock()\n\n\t\tnm.logger.Printf(\"Add transport: %s %s %s\", transportPromise.StreamID(), streamTransport.StreamID, streamTransport.ClientID())\n\t\tnm.params.TracksManager.Add(transportPromise.StreamID(), streamTransport)\n\t}()\n}\n\nfunc (nm *NodeManager) startRoomEventLoop() {\n\tfor {\n\t\troomEvent, err := nm.params.RoomManager.AcceptEvent()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting room event: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch roomEvent.Type {\n\t\tcase RoomEventTypeAdd:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\tnm.logger.Printf(\"Creating new transport for room: %s\", roomEvent.RoomName)\n\t\t\t\ttransportPromise := factory.NewTransport(roomEvent.RoomName)\n\t\t\t\tnm.handleTransportPromise(transportPromise)\n\t\t\t}\n\t\tcase RoomEventTypeRemove:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\tnm.logger.Printf(\"Closing transport for room: %s\", roomEvent.RoomName)\n\t\t\t\tfactory.CloseTransport(roomEvent.RoomName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (nm *NodeManager) Close() error {\n\tnm.params.RoomManager.Close()\n\tnm.transportManager.Close()\n\n\tnm.wg.Wait()\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"golang.org\/x\/net\/proxy\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/olekukonko\/tablewriter\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype config struct {\n\tBaseURL string `yaml:\"baseURL\"`\n\tFormats map[string]format `yaml:\"formats,flow\"`\n\tElements map[string]element `yaml:\"elements,flow\"`\n}\n\ntype element struct {\n\tID string `yaml:\"id\"`\n\tFile string `yaml:\"file\"`\n\tMeta bool `yaml:\"meta\"`\n\tName string `yaml:\"name\"`\n\tFiles []string `yaml:\"files\"`\n\tParent string `yaml:\"parent\"`\n}\n\ntype format struct {\n\tID string `yaml:\"ext\"`\n\tLoc string `yaml:\"loc\"`\n}\n\nvar (\n\tapp = kingpin.New(\"download-geofabrik\", \"A command-line tool for downloading OSM files.\")\n\tfConfig = app.Flag(\"config\", \"Set Config file.\").Default(\".\/geofabrik.yml\").Short('c').String()\n\tnodownload = app.Flag(\"nodownload\", \"Do not download file (test only)\").Short('n').Bool()\n\tverbose = app.Flag(\"verbose\", \"Be verbose\").Short('v').Bool()\n\tfProxyHTTP = app.Flag(\"proxy-http\", \"Use http proxy, format: proxy_address:port\").Default(\"\").String()\n\tfProxySock5 = app.Flag(\"proxy-sock5\", \"Use Sock5 proxy, format: proxy_address:port\").Default(\"\").String()\n\tfProxyUser = app.Flag(\"proxy-user\", \"Proxy user\").Default(\"\").String()\n\tfProxyPass = app.Flag(\"proxy-pass\", \"Proxy password\").Default(\"\").String()\n\n\tupdate = app.Command(\"update\", \"Update geofabrik.yml from github\")\n\tfURL = update.Flag(\"url\", \"Url for config source\").Default(\"https:\/\/raw.githubusercontent.com\/julien-noblet\/download-geofabrik\/stable\/geofabrik.yml\").String()\n\n\tlist = app.Command(\"list\", \"Show elements available\")\n\tlmd = list.Flag(\"markdown\", \"generate list in Markdown format\").Bool()\n\n\tdownload = app.Command(\"download\", \"Download element\") \/\/TODO : add d as command\n\tdelement = download.Arg(\"element\", \"OSM element\").Required().String()\n\tdosmBz2 = download.Flag(\"osm.bz2\", \"Download osm.bz2 if available\").Short('B').Bool()\n\tdshpZip = download.Flag(\"shp.zip\", \"Download shp.zip if available\").Short('S').Bool()\n\tdosmPbf = download.Flag(\"osm.pbf\", \"Download osm.pbf (default)\").Short('P').Bool()\n\tdoshPbf = download.Flag(\"osh.pbf\", \"Download osh.pbf (default)\").Short('H').Bool()\n\tdstate = download.Flag(\"state\", \"Download state.txt file\").Short('s').Bool()\n\tdpoly = download.Flag(\"poly\", \"Download poly file\").Short('p').Bool()\n)\n\nfunc (e *element) hasParent() bool {\n\treturn len(e.Parent) != 0\n}\n\nfunc miniFormats(s []string) string {\n\tres := make([]string, 6)\n\tfor _, item := range s {\n\t\tswitch item {\n\t\tcase \"state\":\n\t\t\tres[0] = \"s\"\n\t\tcase \"osm.pbf\":\n\t\t\tres[1] = \"P\"\n\t\tcase \"osm.bz2\":\n\t\t\tres[2] = \"B\"\n\t\tcase \"osh.pbf\":\n\t\t\tres[3] = \"H\"\n\t\tcase \"poly\":\n\t\t\tres[4] = \"p\"\n\t\tcase \"shp.zip\":\n\t\t\tres[5] = \"S\"\n\t\t}\n\t}\n\n\treturn strings.Join(res, \"\")\n}\n\nfunc downloadFromURL(myURL string, fileName string) {\n\tif *verbose == true {\n\t\tlog.Println(\" Downloading\", myURL, \"to\", fileName)\n\t}\n\n\tif *nodownload == false {\n\t\t\/\/ TODO: check file existence first with io.IsExist\n\t\toutput, err := os.Create(fileName)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\" Error while creating \", fileName, \"-\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer output.Close()\n\t\ttransport := &http.Transport{}\n\t\tif *fProxyHTTP != \"\" {\n\t\t\tu, err := url.Parse(myURL)\n\t\t\t\/\/log.Println(u.Scheme +\":\/\/\"+ *fProxyHTTP)\n\t\t\tproxyURL, err := url.Parse(u.Scheme + \":\/\/\" + *fProxyHTTP)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(\" Wrong proxy url, please use format proxy_address:port\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttransport = &http.Transport{Proxy: http.ProxyURL(proxyURL)}\n\t\t}\n\t\tclient := &http.Client{Transport: transport}\n\t\tif *fProxySock5 != \"\" {\n\t\t\tauth := proxy.Auth{*fProxyUser, *fProxyPass}\n\t\t\tdialer, err := proxy.SOCKS5(\"tcp\", *fProxySock5, &auth, proxy.Direct)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(\" can't connect to the proxy:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttransport.Dial = dialer.Dial\n\t\t}\n\t\tresponse, err := client.Get(myURL)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\" Error while downloading \", myURL, \"-\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer response.Body.Close()\n\n\t\tn, err := io.Copy(output, response.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\" Error while downloading \", myURL, \"-\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif *verbose == true {\n\t\t\tlog.Println(\" \", n, \"bytes downloaded.\")\n\t\t}\n\t}\n}\nfunc elem2preURL(c config, e element) string {\n\tvar res string\n\tif e.hasParent() {\n\t\tres = elem2preURL(c, findElem(c, e.Parent)) + \"\/\"\n\t\tif e.File != \"\" { \/\/TODO use file in config???\n\t\t\tres = res + e.File\n\t\t} else {\n\t\t\tres = res + e.ID\n\t\t}\n\t} else {\n\t\tres = c.BaseURL + \"\/\" + e.ID\n\t}\n\treturn res\n}\n\nfunc elem2URL(c config, e element, ext string) string {\n\tres := elem2preURL(c, e)\n\tres += c.Formats[ext].Loc\n\tif !stringInSlice(ext, e.Files) {\n\t\tlog.Fatalln(\" Error!!! \" + res + \" not exist\")\n\t}\n\n\treturn res\n}\n\nfunc findElem(c config, e string) element {\n\tres := c.Elements[e]\n\tif res.ID == \"\" {\n\t\tlog.Fatalln(\" \" + e + \" is not in config! Please use \\\"list\\\" command!\")\n\t}\n\treturn res\n}\nfunc getFormats() []string {\n\tvar formatFile []string\n\tif *dosmPbf {\n\t\tformatFile = append(formatFile, \"osm.pbf\")\n\t}\n\tif *doshPbf {\n\t\tformatFile = append(formatFile, \"osh.pbf\")\n\t}\n\tif *dosmBz2 {\n\t\tformatFile = append(formatFile, \"osm.bz2\")\n\t}\n\tif *dshpZip {\n\t\tformatFile = append(formatFile, \"shp.zip\")\n\t}\n\tif *dstate {\n\t\tformatFile = append(formatFile, \"state\")\n\t}\n\tif *dpoly {\n\t\tformatFile = append(formatFile, \"poly\")\n\t}\n\tif len(formatFile) == 0 {\n\t\tformatFile = append(formatFile, \"osm.pbf\")\n\t}\n\treturn formatFile\n}\n\nfunc listAllRegions(c config, format string) {\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetHeader([]string{\"ShortName\", \"Is in\", \"Long Name\", \"formats\"})\n\tif format == \"Markdown\" {\n\t\ttable.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\t\ttable.SetCenterSeparator(\"|\")\n\t}\n\tkeys := make(sort.StringSlice, len(c.Elements))\n\ti := 0\n\tfor k := range c.Elements {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tkeys.Sort()\n\tfor _, item := range keys {\n\t\ttable.Append([]string{item, c.Elements[c.Elements[item].Parent].Name, c.Elements[item].Name, miniFormats(c.Elements[item].Files)})\n\t}\n\ttable.Render()\n\tfmt.Printf(\"Total elements: %#v\\n\", len(c.Elements))\n}\n\nfunc loadConfig(configFile string) config {\n\tfilename, _ := filepath.Abs(configFile)\n\tfile, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatalln(\" File error: %v \", err)\n\t\tos.Exit(1)\n\t}\n\tvar myConfig config\n\terr = yaml.Unmarshal(file, &myConfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn myConfig\n\n}\nfunc stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ UpdateConfig : simple script to download lastest config from repo\nfunc UpdateConfig(myURL string, myconfig string) {\n\tdownloadFromURL(myURL, myconfig)\n\tfmt.Println(\"Congratulation, you have the latest geofabrik.yml\")\n}\n\nfunc main() {\n\tswitch kingpin.MustParse(app.Parse(os.Args[1:])) {\n\n\tcase list.FullCommand():\n\t\tvar format = \"\"\n\t\tif *lmd {\n\t\t\tformat = \"Markdown\"\n\t\t}\n\t\tlistAllRegions(loadConfig(*fConfig), format)\n\tcase update.FullCommand():\n\t\tUpdateConfig(*fURL, *fConfig)\n\tcase download.FullCommand():\n\t\tformatFile := getFormats()\n\t\tfor _, format := range formatFile {\n\t\t\tdownloadFromURL(elem2URL(loadConfig(*fConfig), findElem(loadConfig(*fConfig), *delement), format), *delement+\".\"+format)\n\t\t}\n\t}\n}\n<commit_msg>style: linting<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/proxy\"\n\n\t\"github.com\/olekukonko\/tablewriter\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype config struct {\n\tBaseURL string `yaml:\"baseURL\"`\n\tFormats map[string]format `yaml:\"formats,flow\"`\n\tElements map[string]element `yaml:\"elements,flow\"`\n}\n\ntype element struct {\n\tID string `yaml:\"id\"`\n\tFile string `yaml:\"file\"`\n\tMeta bool `yaml:\"meta\"`\n\tName string `yaml:\"name\"`\n\tFiles []string `yaml:\"files\"`\n\tParent string `yaml:\"parent\"`\n}\n\ntype format struct {\n\tID string `yaml:\"ext\"`\n\tLoc string `yaml:\"loc\"`\n}\n\nvar (\n\tapp = kingpin.New(\"download-geofabrik\", \"A command-line tool for downloading OSM files.\")\n\tfConfig = app.Flag(\"config\", \"Set Config file.\").Default(\".\/geofabrik.yml\").Short('c').String()\n\tnodownload = app.Flag(\"nodownload\", \"Do not download file (test only)\").Short('n').Bool()\n\tverbose = app.Flag(\"verbose\", \"Be verbose\").Short('v').Bool()\n\tfProxyHTTP = app.Flag(\"proxy-http\", \"Use http proxy, format: proxy_address:port\").Default(\"\").String()\n\tfProxySock5 = app.Flag(\"proxy-sock5\", \"Use Sock5 proxy, format: proxy_address:port\").Default(\"\").String()\n\tfProxyUser = app.Flag(\"proxy-user\", \"Proxy user\").Default(\"\").String()\n\tfProxyPass = app.Flag(\"proxy-pass\", \"Proxy password\").Default(\"\").String()\n\n\tupdate = app.Command(\"update\", \"Update geofabrik.yml from github\")\n\tfURL = update.Flag(\"url\", \"Url for config source\").Default(\"https:\/\/raw.githubusercontent.com\/julien-noblet\/download-geofabrik\/master\/geofabrik.yml\").String()\n\n\tlist = app.Command(\"list\", \"Show elements available\")\n\tlmd = list.Flag(\"markdown\", \"generate list in Markdown format\").Bool()\n\n\tdownload = app.Command(\"download\", \"Download element\") \/\/TODO : add d as command\n\tdelement = download.Arg(\"element\", \"OSM element\").Required().String()\n\tdosmBz2 = download.Flag(\"osm.bz2\", \"Download osm.bz2 if available\").Short('B').Bool()\n\tdshpZip = download.Flag(\"shp.zip\", \"Download shp.zip if available\").Short('S').Bool()\n\tdosmPbf = download.Flag(\"osm.pbf\", \"Download osm.pbf (default)\").Short('P').Bool()\n\tdoshPbf = download.Flag(\"osh.pbf\", \"Download osh.pbf (default)\").Short('H').Bool()\n\tdstate = download.Flag(\"state\", \"Download state.txt file\").Short('s').Bool()\n\tdpoly = download.Flag(\"poly\", \"Download poly file\").Short('p').Bool()\n)\n\nfunc (e *element) hasParent() bool {\n\treturn len(e.Parent) != 0\n}\n\nfunc miniFormats(s []string) string {\n\tres := make([]string, 6)\n\tfor _, item := range s {\n\t\tswitch item {\n\t\tcase \"state\":\n\t\t\tres[0] = \"s\"\n\t\tcase \"osm.pbf\":\n\t\t\tres[1] = \"P\"\n\t\tcase \"osm.bz2\":\n\t\t\tres[2] = \"B\"\n\t\tcase \"osh.pbf\":\n\t\t\tres[3] = \"H\"\n\t\tcase \"poly\":\n\t\t\tres[4] = \"p\"\n\t\tcase \"shp.zip\":\n\t\t\tres[5] = \"S\"\n\t\t}\n\t}\n\n\treturn strings.Join(res, \"\")\n}\n\nfunc downloadFromURL(myURL string, fileName string) {\n\tif *verbose == true {\n\t\tlog.Println(\" Downloading\", myURL, \"to\", fileName)\n\t}\n\n\tif *nodownload == false {\n\t\t\/\/ TODO: check file existence first with io.IsExist\n\t\toutput, err := os.Create(fileName)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\" Error while creating \", fileName, \"-\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer output.Close()\n\t\ttransport := &http.Transport{}\n\t\tif *fProxyHTTP != \"\" {\n\t\t\tu, err := url.Parse(myURL)\n\t\t\t\/\/log.Println(u.Scheme +\":\/\/\"+ *fProxyHTTP)\n\t\t\tproxyURL, err := url.Parse(u.Scheme + \":\/\/\" + *fProxyHTTP)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(\" Wrong proxy url, please use format proxy_address:port\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttransport = &http.Transport{Proxy: http.ProxyURL(proxyURL)}\n\t\t}\n\t\tclient := &http.Client{Transport: transport}\n\t\tif *fProxySock5 != \"\" {\n\t\t\tauth := proxy.Auth{*fProxyUser, *fProxyPass}\n\t\t\tdialer, err := proxy.SOCKS5(\"tcp\", *fProxySock5, &auth, proxy.Direct)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(\" can't connect to the proxy:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttransport.Dial = dialer.Dial\n\t\t}\n\t\tresponse, err := client.Get(myURL)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\" Error while downloading \", myURL, \"-\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer response.Body.Close()\n\n\t\tn, err := io.Copy(output, response.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\" Error while downloading \", myURL, \"-\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif *verbose == true {\n\t\t\tlog.Println(\" \", n, \"bytes downloaded.\")\n\t\t}\n\t}\n}\nfunc elem2preURL(c config, e element) string {\n\tvar res string\n\tif e.hasParent() {\n\t\tres = elem2preURL(c, findElem(c, e.Parent)) + \"\/\"\n\t\tif e.File != \"\" { \/\/TODO use file in config???\n\t\t\tres = res + e.File\n\t\t} else {\n\t\t\tres = res + e.ID\n\t\t}\n\t} else {\n\t\tres = c.BaseURL + \"\/\" + e.ID\n\t}\n\treturn res\n}\n\nfunc elem2URL(c config, e element, ext string) string {\n\tres := elem2preURL(c, e)\n\tres += c.Formats[ext].Loc\n\tif !stringInSlice(ext, e.Files) {\n\t\tlog.Fatalln(\" Error!!! \" + res + \" not exist\")\n\t}\n\n\treturn res\n}\n\nfunc findElem(c config, e string) element {\n\tres := c.Elements[e]\n\tif res.ID == \"\" {\n\t\tlog.Fatalln(\" \" + e + \" is not in config! Please use \\\"list\\\" command!\")\n\t}\n\treturn res\n}\nfunc getFormats() []string {\n\tvar formatFile []string\n\tif *dosmPbf {\n\t\tformatFile = append(formatFile, \"osm.pbf\")\n\t}\n\tif *doshPbf {\n\t\tformatFile = append(formatFile, \"osh.pbf\")\n\t}\n\tif *dosmBz2 {\n\t\tformatFile = append(formatFile, \"osm.bz2\")\n\t}\n\tif *dshpZip {\n\t\tformatFile = append(formatFile, \"shp.zip\")\n\t}\n\tif *dstate {\n\t\tformatFile = append(formatFile, \"state\")\n\t}\n\tif *dpoly {\n\t\tformatFile = append(formatFile, \"poly\")\n\t}\n\tif len(formatFile) == 0 {\n\t\tformatFile = append(formatFile, \"osm.pbf\")\n\t}\n\treturn formatFile\n}\n\nfunc listAllRegions(c config, format string) {\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetHeader([]string{\"ShortName\", \"Is in\", \"Long Name\", \"formats\"})\n\tif format == \"Markdown\" {\n\t\ttable.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\t\ttable.SetCenterSeparator(\"|\")\n\t}\n\tkeys := make(sort.StringSlice, len(c.Elements))\n\ti := 0\n\tfor k := range c.Elements {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tkeys.Sort()\n\tfor _, item := range keys {\n\t\ttable.Append([]string{item, c.Elements[c.Elements[item].Parent].Name, c.Elements[item].Name, miniFormats(c.Elements[item].Files)})\n\t}\n\ttable.Render()\n\tfmt.Printf(\"Total elements: %#v\\n\", len(c.Elements))\n}\n\nfunc loadConfig(configFile string) config {\n\tfilename, _ := filepath.Abs(configFile)\n\tfile, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatalln(\" File error: %v \", err)\n\t\tos.Exit(1)\n\t}\n\tvar myConfig config\n\terr = yaml.Unmarshal(file, &myConfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn myConfig\n\n}\nfunc stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ UpdateConfig : simple script to download lastest config from repo\nfunc UpdateConfig(myURL string, myconfig string) {\n\tdownloadFromURL(myURL, myconfig)\n\tfmt.Println(\"Congratulation, you have the latest geofabrik.yml\")\n}\n\nfunc main() {\n\tswitch kingpin.MustParse(app.Parse(os.Args[1:])) {\n\n\tcase list.FullCommand():\n\t\tvar format = \"\"\n\t\tif *lmd {\n\t\t\tformat = \"Markdown\"\n\t\t}\n\t\tlistAllRegions(loadConfig(*fConfig), format)\n\tcase update.FullCommand():\n\t\tUpdateConfig(*fURL, *fConfig)\n\tcase download.FullCommand():\n\t\tformatFile := getFormats()\n\t\tfor _, format := range formatFile {\n\t\t\tdownloadFromURL(elem2URL(loadConfig(*fConfig), findElem(loadConfig(*fConfig), *delement), format), *delement+\".\"+format)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/getlantern\/go-update\"\n\t\"github.com\/getlantern\/go-update\/check\"\n)\n\nconst (\n\tlocalAddr = \"127.0.0.1:1123\"\n\tpublicAddr = localAddr\n)\n\nfunc init() {\n\tSetPrivateKey(\"..\/_resources\/example-keys\/private.key\")\n}\n\nfunc TestReachServer(t *testing.T) {\n\tupdateServer := NewUpdateServer(publicAddr, localAddr, \".\", 0)\n\n\tupdateServer.HandleRepo(\"\/update\", \"getlantern\", \"lantern\")\n\tupdateServer.HandleRepo(\"\/update\/getlantern\/lantern\", \"getlantern\", \"lantern\")\n\n\tgo updateServer.ListenAndServe()\n\tdefer updateServer.Close()\n\n\tpublicKey, err := ioutil.ReadFile(\"..\/_resources\/example-keys\/public.pub\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to open public key: %v\", err)\n\t}\n\n\tparam := check.Params{\n\t\tAppVersion: \"3.7.1\",\n\t}\n\n\tup := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF)\n\n\tif _, err = up.VerifySignatureWithPEM(publicKey); err != nil {\n\t\tt.Fatal(\"VerifySignatureWithPEM\", err)\n\t}\n\n\tres, err := param.CheckForUpdate(fmt.Sprintf(\"http:\/\/%s\/update\", localAddr), up)\n\tif err != nil {\n\t\tt.Fatalf(\"CheckForUpdate: %v\", err)\n\t}\n\n\tif res.Url == \"\" {\n\t\tt.Fatal(\"Expecting some URL.\")\n\t}\n\n\tres, err = param.CheckForUpdate(fmt.Sprintf(\"http:\/\/%s\/update\/getlantern\/lantern\", localAddr), up)\n\tif err != nil {\n\t\tt.Fatalf(\"CheckForUpdate: %v\", err)\n\t}\n\n\tif res.Url == \"\" {\n\t\tt.Fatal(\"Expecting some URL.\")\n\t}\n}\n<commit_msg>fix tests<commit_after>package server\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/getlantern\/go-update\"\n\t\"github.com\/getlantern\/go-update\/check\"\n)\n\nconst (\n\tlocalAddr = \"127.0.0.1:1123\"\n\tpublicAddr = localAddr\n)\n\nfunc init() {\n\tSetPrivateKey(\"..\/_resources\/example-keys\/private.key\")\n}\n\nfunc TestReachServer(t *testing.T) {\n\tupdateServer := NewUpdateServer(publicAddr, localAddr, \".\", 0)\n\n\tupdateServer.HandleRepo(\"\", \"getlantern\", \"lantern\")\n\tupdateServer.HandleRepo(\"lantern\", \"getlantern\", \"lantern\")\n\n\tgo updateServer.ListenAndServe()\n\tdefer updateServer.Close()\n\n\tpublicKey, err := ioutil.ReadFile(\"..\/_resources\/example-keys\/public.pub\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to open public key: %v\", err)\n\t}\n\n\tparam := check.Params{\n\t\tAppVersion: \"3.7.1\",\n\t}\n\n\tup := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF)\n\n\tif _, err = up.VerifySignatureWithPEM(publicKey); err != nil {\n\t\tt.Fatal(\"VerifySignatureWithPEM\", err)\n\t}\n\n\tres, err := param.CheckForUpdate(fmt.Sprintf(\"http:\/\/%s\/update\", localAddr), up)\n\tif err != nil {\n\t\tt.Fatalf(\"CheckForUpdate: %v\", err)\n\t}\n\n\tif res.Url == \"\" {\n\t\tt.Fatal(\"Expecting some URL.\")\n\t}\n\n\tres, err = param.CheckForUpdate(fmt.Sprintf(\"http:\/\/%s\/update\/lantern\", localAddr), up)\n\tif err != nil {\n\t\tt.Fatalf(\"CheckForUpdate: %v\", err)\n\t}\n\n\tif res.Url == \"\" {\n\t\tt.Fatal(\"Expecting some URL.\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package service\n\nimport (\n\t\"github.com\/go-kit\/kit\/sd\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\ntype mockInstancer struct {\n\tmock.Mock\n}\n\nfunc (m *mockInstancer) Register(events chan<- sd.Event) {\n\tm.Called(events)\n}\n\nfunc (m *mockInstancer) Deregister(events chan<- sd.Event) {\n\tm.Called(events)\n}\n\ntype mockAccessor struct {\n\tmock.Mock\n}\n\nfunc (m *mockAccessor) Get(key []byte) (string, error) {\n\targuments := m.Called(key)\n\treturn arguments.String(0), arguments.Error(1)\n}\n\ntype mockSubscription struct {\n\tmock.Mock\n}\n\nfunc (m *mockSubscription) Stopped() <-chan struct{} {\n\treturn m.Called().Get(0).(<-chan struct{})\n}\n\nfunc (m *mockSubscription) Stop() {\n\tm.Called()\n}\n\nfunc (m *mockSubscription) Updates() <-chan Accessor {\n\treturn m.Called().Get(0).(<-chan Accessor)\n}\n<commit_msg>pruned unused mock code<commit_after>package service\n\nimport (\n\t\"github.com\/go-kit\/kit\/sd\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\ntype mockInstancer struct {\n\tmock.Mock\n}\n\nfunc (m *mockInstancer) Register(events chan<- sd.Event) {\n\tm.Called(events)\n}\n\nfunc (m *mockInstancer) Deregister(events chan<- sd.Event) {\n\tm.Called(events)\n}\n\ntype mockAccessor struct {\n\tmock.Mock\n}\n\nfunc (m *mockAccessor) Get(key []byte) (string, error) {\n\targuments := m.Called(key)\n\treturn arguments.String(0), arguments.Error(1)\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudns\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/miekg\/dns\/dnsutil\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/pkg\/diff\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/providers\"\n)\n\n\/*\nClouDNS API DNS provider:\nInfo required in `creds.json`:\n - auth-id or sub-auth-id\n - auth-password\n*\/\n\n\/\/ NewCloudns creates the provider.\nfunc NewCloudns(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\tc := &cloudnsProvider{}\n\n\tc.creds.id, c.creds.password, c.creds.subid = m[\"auth-id\"], m[\"auth-password\"], m[\"sub-auth-id\"]\n\n\tif (c.creds.id == \"\" && c.creds.subid == \"\") || c.creds.password == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing ClouDNS auth-id or sub-auth-id and auth-password\")\n\t}\n\n\t\/\/ Get a domain to validate authentication\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nvar features = providers.DocumentationNotes{\n\tproviders.DocDualHost: providers.Unimplemented(),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n\tproviders.DocCreateDomains: providers.Can(),\n\tproviders.CanUseAlias: providers.Can(),\n\tproviders.CanUseSRV: providers.Can(),\n\tproviders.CanUseSSHFP: providers.Can(),\n\tproviders.CanUseCAA: providers.Can(),\n\tproviders.CanUseTLSA: providers.Can(),\n\tproviders.CanUsePTR: providers.Can(),\n\tproviders.CanGetZones: providers.Can(),\n\tproviders.CanUseDSForChildren: providers.Can(),\n\t\/\/providers.CanUseDS: providers.Can(), \/\/ in ClouDNS we can add DS record just for a subdomain(child)\n}\n\nfunc init() {\n\tfns := providers.DspFuncs{\n\t\tInitializer: NewCloudns,\n\t\tRecordAuditor: AuditRecords,\n\t}\n\tproviders.RegisterDomainServiceProviderType(\"CLOUDNS\", fns, features)\n}\n\n\/\/ GetNameservers returns the nameservers for a domain.\nfunc (c *cloudnsProvider) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\tif len(c.nameserversNames) == 0 {\n\t\tc.fetchAvailableNameservers()\n\t}\n\treturn models.ToNameservers(c.nameserversNames)\n}\n\n\/\/ GetDomainCorrections returns the corrections for a domain.\nfunc (c *cloudnsProvider) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\tdc, err := dc.Copy()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc.Punycode()\n\n\tif c.domainIndex == nil {\n\t\tif err := c.fetchDomainList(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdomainID, ok := c.domainIndex[dc.Name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"'%s' not a zone in ClouDNS account\", dc.Name)\n\t}\n\n\texistingRecords, err := c.GetZoneRecords(dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Normalize\n\tmodels.PostProcessRecords(existingRecords)\n\n\t\/\/ Get a list of available TTL values.\n\t\/\/ The TTL list needs to be obtained for each domain, so get it first here.\n\tc.fetchAvailableTTLValues(dc.Name)\n\t\/\/ ClouDNS can only be specified from a specific TTL list, so change the TTL in advance.\n\tfor _, record := range dc.Records {\n\t\trecord.TTL = fixTTL(record.TTL)\n\t}\n\n\tdiffer := diff.New(dc)\n\t_, create, del, modify, err := differ.IncrementalDiff(existingRecords)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar corrections []*models.Correction\n\n\t\/\/ Deletes first so changing type works etc.\n\tfor _, m := range del {\n\t\tid := m.Existing.Original.(*domainRecord).ID\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, ClouDNS ID: %s\", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\treturn c.deleteRecord(domainID, id)\n\t\t\t},\n\t\t}\n\t\t\/\/ at ClouDNS, we MUST have a NS for a DS\n\t\t\/\/ So, when deleting, we must delete the DS first, otherwise deleting the NS throws an error\n\t\tif m.Existing.Type == \"DS\" {\n\t\t\t\/\/ type DS is prepended - so executed first\n\t\t\tcorrections = append([]*models.Correction{corr}, corrections...)\n\t\t} else {\n\t\t\tcorrections = append(corrections, corr)\n\t\t}\n\t}\n\n\tvar createCorrections []*models.Correction\n\tfor _, m := range create {\n\t\treq, err := toReq(m.Desired)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcorr := &models.Correction{\n\t\t\tMsg: m.String(),\n\t\t\tF: func() error {\n\t\t\t\treturn c.createRecord(domainID, req)\n\t\t\t},\n\t\t}\n\t\t\/\/ at ClouDNS, we MUST have a NS for a DS\n\t\t\/\/ So, when creating, we must create the NS first, otherwise creating the DS throws an error\n\t\tif m.Desired.Type == \"NS\" {\n\t\t\t\/\/ type NS is prepended - so executed first\n\t\t\tcreateCorrections = append([]*models.Correction{corr}, createCorrections...)\n\t\t} else {\n\t\t\tcreateCorrections = append(createCorrections, corr)\n\t\t}\n\t}\n\tcorrections = append(corrections, createCorrections...)\n\n\tfor _, m := range modify {\n\t\tid := m.Existing.Original.(*domainRecord).ID\n\t\treq, err := toReq(m.Desired)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, ClouDNS ID: %s: \", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\treturn c.modifyRecord(domainID, id, req)\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\n\treturn corrections, nil\n}\n\n\/\/ GetZoneRecords gets the records of a zone and returns them in RecordConfig format.\nfunc (c *cloudnsProvider) GetZoneRecords(domain string) (models.Records, error) {\n\trecords, err := c.getRecords(domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(domain, &records[i])\n\t}\n\treturn existingRecords, nil\n}\n\n\/\/ EnsureDomainExists returns an error if domain doesn't exist.\nfunc (c *cloudnsProvider) EnsureDomainExists(domain string) error {\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ domain already exists\n\tif _, ok := c.domainIndex[domain]; ok {\n\t\treturn nil\n\t}\n\treturn c.createDomain(domain)\n}\n\n\/\/parses the ClouDNS format into our standard RecordConfig\nfunc toRc(domain string, r *domainRecord) *models.RecordConfig {\n\n\tttl, _ := strconv.ParseUint(r.TTL, 10, 32)\n\tpriority, _ := strconv.ParseUint(r.Priority, 10, 16)\n\tweight, _ := strconv.ParseUint(r.Weight, 10, 16)\n\tport, _ := strconv.ParseUint(r.Port, 10, 16)\n\n\trc := &models.RecordConfig{\n\t\tType: r.Type,\n\t\tTTL: uint32(ttl),\n\t\tMxPreference: uint16(priority),\n\t\tSrvPriority: uint16(priority),\n\t\tSrvWeight: uint16(weight),\n\t\tSrvPort: uint16(port),\n\t\tOriginal: r,\n\t}\n\trc.SetLabel(r.Host, domain)\n\n\tswitch rtype := r.Type; rtype { \/\/ #rtype_variations\n\tcase \"TXT\":\n\t\trc.SetTargetTXT(r.Target)\n\tcase \"CNAME\", \"MX\", \"NS\", \"SRV\", \"ALIAS\", \"PTR\":\n\t\trc.SetTarget(dnsutil.AddOrigin(r.Target+\".\", domain))\n\tcase \"CAA\":\n\t\tcaaFlag, _ := strconv.ParseUint(r.CaaFlag, 10, 8)\n\t\trc.CaaFlag = uint8(caaFlag)\n\t\trc.CaaTag = r.CaaTag\n\t\trc.SetTarget(r.CaaValue)\n\tcase \"TLSA\":\n\t\ttlsaUsage, _ := strconv.ParseUint(r.TlsaUsage, 10, 8)\n\t\trc.TlsaUsage = uint8(tlsaUsage)\n\t\ttlsaSelector, _ := strconv.ParseUint(r.TlsaSelector, 10, 8)\n\t\trc.TlsaSelector = uint8(tlsaSelector)\n\t\ttlsaMatchingType, _ := strconv.ParseUint(r.TlsaMatchingType, 10, 8)\n\t\trc.TlsaMatchingType = uint8(tlsaMatchingType)\n\t\trc.SetTarget(r.Target)\n\tcase \"SSHFP\":\n\t\tsshfpAlgorithm, _ := strconv.ParseUint(r.SshfpAlgorithm, 10, 8)\n\t\trc.SshfpAlgorithm = uint8(sshfpAlgorithm)\n\t\tsshfpFingerprint, _ := strconv.ParseUint(r.SshfpFingerprint, 10, 8)\n\t\trc.SshfpFingerprint = uint8(sshfpFingerprint)\n\t\trc.SetTarget(r.Target)\n\tcase \"DS\":\n\t\tdsKeyTag, _ := strconv.ParseUint(r.DsKeyTag, 10, 16)\n\t\trc.DsKeyTag = uint16(dsKeyTag)\n\t\tdsAlgorithm, _ := strconv.ParseUint(r.SshfpAlgorithm, 10, 8) \/\/ SshFpAlgorithm and DsAlgorithm both use json field \"algorithm\"\n\t\trc.DsAlgorithm = uint8(dsAlgorithm)\n\t\tdsDigestType, _ := strconv.ParseUint(r.DsDigestType, 10, 8)\n\t\trc.DsDigestType = uint8(dsDigestType)\n\t\trc.DsDigest = r.Target\n\t\trc.SetTarget(r.Target)\n\tdefault:\n\t\trc.SetTarget(r.Target)\n\t}\n\n\treturn rc\n}\n\n\/\/toReq takes a RecordConfig and turns it into the native format used by the API.\nfunc toReq(rc *models.RecordConfig) (requestParams, error) {\n\treq := requestParams{\n\t\t\"record-type\": rc.Type,\n\t\t\"host\": rc.GetLabel(),\n\t\t\"record\": rc.GetTargetField(),\n\t\t\"ttl\": strconv.Itoa(int(rc.TTL)),\n\t}\n\n\t\/\/ ClouDNS doesn't use \"@\", it uses an empty name\n\tif req[\"host\"] == \"@\" {\n\t\treq[\"host\"] = \"\"\n\t}\n\n\tswitch rc.Type { \/\/ #rtype_variations\n\tcase \"A\", \"AAAA\", \"NS\", \"PTR\", \"TXT\", \"SOA\", \"ALIAS\", \"CNAME\":\n\t\t\/\/ Nothing special.\n\tcase \"MX\":\n\t\treq[\"priority\"] = strconv.Itoa(int(rc.MxPreference))\n\tcase \"SRV\":\n\t\treq[\"priority\"] = strconv.Itoa(int(rc.SrvPriority))\n\t\treq[\"weight\"] = strconv.Itoa(int(rc.SrvWeight))\n\t\treq[\"port\"] = strconv.Itoa(int(rc.SrvPort))\n\tcase \"CAA\":\n\t\treq[\"caa_flag\"] = strconv.Itoa(int(rc.CaaFlag))\n\t\treq[\"caa_type\"] = rc.CaaTag\n\t\treq[\"caa_value\"] = rc.GetTargetField()\n\tcase \"TLSA\":\n\t\treq[\"tlsa_usage\"] = strconv.Itoa(int(rc.TlsaUsage))\n\t\treq[\"tlsa_selector\"] = strconv.Itoa(int(rc.TlsaSelector))\n\t\treq[\"tlsa_matching_type\"] = strconv.Itoa(int(rc.TlsaMatchingType))\n\tcase \"SSHFP\":\n\t\treq[\"algorithm\"] = strconv.Itoa(int(rc.SshfpAlgorithm))\n\t\treq[\"fptype\"] = strconv.Itoa(int(rc.SshfpFingerprint))\n\tcase \"DS\":\n\t\treq[\"key-tag\"] = strconv.Itoa(int(rc.DsKeyTag))\n\t\treq[\"algorithm\"] = strconv.Itoa(int(rc.DsAlgorithm))\n\t\treq[\"digest-type\"] = strconv.Itoa(int(rc.DsDigestType))\n\t\treq[\"record\"] = rc.DsDigest\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"ClouDNS.toReq rtype %q unimplemented\", rc.Type)\n\t}\n\n\treturn req, nil\n}\n<commit_msg>CLOUDNS: Fix name server updates #1263 (#1303)<commit_after>package cloudns\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/miekg\/dns\/dnsutil\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/pkg\/diff\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/providers\"\n)\n\n\/*\nClouDNS API DNS provider:\nInfo required in `creds.json`:\n - auth-id or sub-auth-id\n - auth-password\n*\/\n\n\/\/ NewCloudns creates the provider.\nfunc NewCloudns(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\tc := &cloudnsProvider{}\n\n\tc.creds.id, c.creds.password, c.creds.subid = m[\"auth-id\"], m[\"auth-password\"], m[\"sub-auth-id\"]\n\n\tif (c.creds.id == \"\" && c.creds.subid == \"\") || c.creds.password == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing ClouDNS auth-id or sub-auth-id and auth-password\")\n\t}\n\n\t\/\/ Get a domain to validate authentication\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nvar features = providers.DocumentationNotes{\n\tproviders.DocDualHost: providers.Unimplemented(),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n\tproviders.DocCreateDomains: providers.Can(),\n\tproviders.CanUseAlias: providers.Can(),\n\tproviders.CanUseSRV: providers.Can(),\n\tproviders.CanUseSSHFP: providers.Can(),\n\tproviders.CanUseCAA: providers.Can(),\n\tproviders.CanUseTLSA: providers.Can(),\n\tproviders.CanUsePTR: providers.Can(),\n\tproviders.CanGetZones: providers.Can(),\n\tproviders.CanUseDSForChildren: providers.Can(),\n\t\/\/providers.CanUseDS: providers.Can(), \/\/ in ClouDNS we can add DS record just for a subdomain(child)\n}\n\nfunc init() {\n\tfns := providers.DspFuncs{\n\t\tInitializer: NewCloudns,\n\t\tRecordAuditor: AuditRecords,\n\t}\n\tproviders.RegisterDomainServiceProviderType(\"CLOUDNS\", fns, features)\n}\n\n\/\/ GetNameservers returns the nameservers for a domain.\nfunc (c *cloudnsProvider) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\tif len(c.nameserversNames) == 0 {\n\t\tc.fetchAvailableNameservers()\n\t}\n\treturn models.ToNameservers(c.nameserversNames)\n}\n\n\/\/ GetDomainCorrections returns the corrections for a domain.\nfunc (c *cloudnsProvider) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\tdc, err := dc.Copy()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc.Punycode()\n\n\tif c.domainIndex == nil {\n\t\tif err := c.fetchDomainList(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdomainID, ok := c.domainIndex[dc.Name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"'%s' not a zone in ClouDNS account\", dc.Name)\n\t}\n\n\texistingRecords, err := c.GetZoneRecords(dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Normalize\n\tmodels.PostProcessRecords(existingRecords)\n\n\t\/\/ Get a list of available TTL values.\n\t\/\/ The TTL list needs to be obtained for each domain, so get it first here.\n\tc.fetchAvailableTTLValues(dc.Name)\n\t\/\/ ClouDNS can only be specified from a specific TTL list, so change the TTL in advance.\n\tfor _, record := range dc.Records {\n\t\trecord.TTL = fixTTL(record.TTL)\n\t}\n\n\tdiffer := diff.New(dc)\n\t_, create, del, modify, err := differ.IncrementalDiff(existingRecords)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar corrections []*models.Correction\n\n\t\/\/ Deletes first so changing type works etc.\n\tfor _, m := range del {\n\t\tid := m.Existing.Original.(*domainRecord).ID\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, ClouDNS ID: %s\", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\treturn c.deleteRecord(domainID, id)\n\t\t\t},\n\t\t}\n\t\t\/\/ at ClouDNS, we MUST have a NS for a DS\n\t\t\/\/ So, when deleting, we must delete the DS first, otherwise deleting the NS throws an error\n\t\tif m.Existing.Type == \"DS\" {\n\t\t\t\/\/ type DS is prepended - so executed first\n\t\t\tcorrections = append([]*models.Correction{corr}, corrections...)\n\t\t} else {\n\t\t\tcorrections = append(corrections, corr)\n\t\t}\n\t}\n\n\tvar createCorrections []*models.Correction\n\tfor _, m := range create {\n\t\treq, err := toReq(m.Desired)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcorr := &models.Correction{\n\t\t\tMsg: m.String(),\n\t\t\tF: func() error {\n\t\t\t\treturn c.createRecord(domainID, req)\n\t\t\t},\n\t\t}\n\t\t\/\/ at ClouDNS, we MUST have a NS for a DS\n\t\t\/\/ So, when creating, we must create the NS first, otherwise creating the DS throws an error\n\t\tif m.Desired.Type == \"NS\" {\n\t\t\t\/\/ type NS is prepended - so executed first\n\t\t\tcreateCorrections = append([]*models.Correction{corr}, createCorrections...)\n\t\t} else {\n\t\t\tcreateCorrections = append(createCorrections, corr)\n\t\t}\n\t}\n\tcorrections = append(corrections, createCorrections...)\n\n\tfor _, m := range modify {\n\t\tid := m.Existing.Original.(*domainRecord).ID\n\t\treq, err := toReq(m.Desired)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ ClouDNS does not require the trailing period to be specified when updating an NS record where the A or AAAA record exists in the zone.\n\t\t\/\/ So, modify it to remove the trailing period.\n\t\tif req[\"record-type\"] == \"NS\" && strings.HasSuffix(req[\"record\"], domainID+\".\") {\n\t\t\treq[\"record\"] = strings.TrimSuffix(req[\"record\"], \".\")\n\t\t}\n\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, ClouDNS ID: %s: \", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\treturn c.modifyRecord(domainID, id, req)\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\n\treturn corrections, nil\n}\n\n\/\/ GetZoneRecords gets the records of a zone and returns them in RecordConfig format.\nfunc (c *cloudnsProvider) GetZoneRecords(domain string) (models.Records, error) {\n\trecords, err := c.getRecords(domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(domain, &records[i])\n\t}\n\treturn existingRecords, nil\n}\n\n\/\/ EnsureDomainExists returns an error if domain doesn't exist.\nfunc (c *cloudnsProvider) EnsureDomainExists(domain string) error {\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ domain already exists\n\tif _, ok := c.domainIndex[domain]; ok {\n\t\treturn nil\n\t}\n\treturn c.createDomain(domain)\n}\n\n\/\/parses the ClouDNS format into our standard RecordConfig\nfunc toRc(domain string, r *domainRecord) *models.RecordConfig {\n\n\tttl, _ := strconv.ParseUint(r.TTL, 10, 32)\n\tpriority, _ := strconv.ParseUint(r.Priority, 10, 16)\n\tweight, _ := strconv.ParseUint(r.Weight, 10, 16)\n\tport, _ := strconv.ParseUint(r.Port, 10, 16)\n\n\trc := &models.RecordConfig{\n\t\tType: r.Type,\n\t\tTTL: uint32(ttl),\n\t\tMxPreference: uint16(priority),\n\t\tSrvPriority: uint16(priority),\n\t\tSrvWeight: uint16(weight),\n\t\tSrvPort: uint16(port),\n\t\tOriginal: r,\n\t}\n\trc.SetLabel(r.Host, domain)\n\n\tswitch rtype := r.Type; rtype { \/\/ #rtype_variations\n\tcase \"TXT\":\n\t\trc.SetTargetTXT(r.Target)\n\tcase \"CNAME\", \"MX\", \"NS\", \"SRV\", \"ALIAS\", \"PTR\":\n\t\trc.SetTarget(dnsutil.AddOrigin(r.Target+\".\", domain))\n\tcase \"CAA\":\n\t\tcaaFlag, _ := strconv.ParseUint(r.CaaFlag, 10, 8)\n\t\trc.CaaFlag = uint8(caaFlag)\n\t\trc.CaaTag = r.CaaTag\n\t\trc.SetTarget(r.CaaValue)\n\tcase \"TLSA\":\n\t\ttlsaUsage, _ := strconv.ParseUint(r.TlsaUsage, 10, 8)\n\t\trc.TlsaUsage = uint8(tlsaUsage)\n\t\ttlsaSelector, _ := strconv.ParseUint(r.TlsaSelector, 10, 8)\n\t\trc.TlsaSelector = uint8(tlsaSelector)\n\t\ttlsaMatchingType, _ := strconv.ParseUint(r.TlsaMatchingType, 10, 8)\n\t\trc.TlsaMatchingType = uint8(tlsaMatchingType)\n\t\trc.SetTarget(r.Target)\n\tcase \"SSHFP\":\n\t\tsshfpAlgorithm, _ := strconv.ParseUint(r.SshfpAlgorithm, 10, 8)\n\t\trc.SshfpAlgorithm = uint8(sshfpAlgorithm)\n\t\tsshfpFingerprint, _ := strconv.ParseUint(r.SshfpFingerprint, 10, 8)\n\t\trc.SshfpFingerprint = uint8(sshfpFingerprint)\n\t\trc.SetTarget(r.Target)\n\tcase \"DS\":\n\t\tdsKeyTag, _ := strconv.ParseUint(r.DsKeyTag, 10, 16)\n\t\trc.DsKeyTag = uint16(dsKeyTag)\n\t\tdsAlgorithm, _ := strconv.ParseUint(r.SshfpAlgorithm, 10, 8) \/\/ SshFpAlgorithm and DsAlgorithm both use json field \"algorithm\"\n\t\trc.DsAlgorithm = uint8(dsAlgorithm)\n\t\tdsDigestType, _ := strconv.ParseUint(r.DsDigestType, 10, 8)\n\t\trc.DsDigestType = uint8(dsDigestType)\n\t\trc.DsDigest = r.Target\n\t\trc.SetTarget(r.Target)\n\tdefault:\n\t\trc.SetTarget(r.Target)\n\t}\n\n\treturn rc\n}\n\n\/\/toReq takes a RecordConfig and turns it into the native format used by the API.\nfunc toReq(rc *models.RecordConfig) (requestParams, error) {\n\treq := requestParams{\n\t\t\"record-type\": rc.Type,\n\t\t\"host\": rc.GetLabel(),\n\t\t\"record\": rc.GetTargetField(),\n\t\t\"ttl\": strconv.Itoa(int(rc.TTL)),\n\t}\n\n\t\/\/ ClouDNS doesn't use \"@\", it uses an empty name\n\tif req[\"host\"] == \"@\" {\n\t\treq[\"host\"] = \"\"\n\t}\n\n\tswitch rc.Type { \/\/ #rtype_variations\n\tcase \"A\", \"AAAA\", \"NS\", \"PTR\", \"TXT\", \"SOA\", \"ALIAS\", \"CNAME\":\n\t\t\/\/ Nothing special.\n\tcase \"MX\":\n\t\treq[\"priority\"] = strconv.Itoa(int(rc.MxPreference))\n\tcase \"SRV\":\n\t\treq[\"priority\"] = strconv.Itoa(int(rc.SrvPriority))\n\t\treq[\"weight\"] = strconv.Itoa(int(rc.SrvWeight))\n\t\treq[\"port\"] = strconv.Itoa(int(rc.SrvPort))\n\tcase \"CAA\":\n\t\treq[\"caa_flag\"] = strconv.Itoa(int(rc.CaaFlag))\n\t\treq[\"caa_type\"] = rc.CaaTag\n\t\treq[\"caa_value\"] = rc.GetTargetField()\n\tcase \"TLSA\":\n\t\treq[\"tlsa_usage\"] = strconv.Itoa(int(rc.TlsaUsage))\n\t\treq[\"tlsa_selector\"] = strconv.Itoa(int(rc.TlsaSelector))\n\t\treq[\"tlsa_matching_type\"] = strconv.Itoa(int(rc.TlsaMatchingType))\n\tcase \"SSHFP\":\n\t\treq[\"algorithm\"] = strconv.Itoa(int(rc.SshfpAlgorithm))\n\t\treq[\"fptype\"] = strconv.Itoa(int(rc.SshfpFingerprint))\n\tcase \"DS\":\n\t\treq[\"key-tag\"] = strconv.Itoa(int(rc.DsKeyTag))\n\t\treq[\"algorithm\"] = strconv.Itoa(int(rc.DsAlgorithm))\n\t\treq[\"digest-type\"] = strconv.Itoa(int(rc.DsDigestType))\n\t\treq[\"record\"] = rc.DsDigest\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"ClouDNS.toReq rtype %q unimplemented\", rc.Type)\n\t}\n\n\treturn req, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage trigger\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"k8s.io\/test-infra\/prow\/errorutil\"\n\t\"k8s.io\/test-infra\/prow\/github\"\n\t\"k8s.io\/test-infra\/prow\/labels\"\n\t\"k8s.io\/test-infra\/prow\/plugins\"\n)\n\nfunc handlePR(c Client, trigger *plugins.Trigger, pr github.PullRequestEvent) error {\n\torg, repo, a := orgRepoAuthor(pr.PullRequest)\n\tauthor := string(a)\n\tnum := pr.PullRequest.Number\n\tswitch pr.Action {\n\tcase github.PullRequestActionOpened:\n\t\t\/\/ When a PR is opened, if the author is in the org then build it.\n\t\t\/\/ Otherwise, ask for \"\/ok-to-test\". There's no need to look for previous\n\t\t\/\/ \"\/ok-to-test\" comments since the PR was just opened!\n\t\tmember, err := TrustedUser(c.GitHubClient, trigger, author, org, repo)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not check membership: %s\", err)\n\t\t}\n\t\tif member {\n\t\t\tc.Logger.Info(\"Starting all jobs for new PR.\")\n\t\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t\t}\n\t\tc.Logger.Infof(\"Welcome message to PR author %q.\", author)\n\t\tif err := welcomeMsg(c.GitHubClient, trigger, pr.PullRequest); err != nil {\n\t\t\treturn fmt.Errorf(\"could not welcome non-org member %q: %v\", author, err)\n\t\t}\n\tcase github.PullRequestActionReopened:\n\t\t\/\/ When a PR is reopened, check that the user is in the org or that an org\n\t\t\/\/ member had said \"\/ok-to-test\" before building, resulting in label ok-to-test.\n\t\tl, trusted, err := TrustedPullRequest(c.GitHubClient, trigger, author, org, repo, num, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not validate PR: %s\", err)\n\t\t} else if trusted {\n\t\t\t\/\/ Eventually remove need-ok-to-test\n\t\t\t\/\/ Does not work for TrustedUser() == true since labels are not fetched in this case\n\t\t\tif github.HasLabel(labels.NeedsOkToTest, l) {\n\t\t\t\tif err := c.GitHubClient.RemoveLabel(org, repo, num, labels.NeedsOkToTest); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.Logger.Info(\"Starting all jobs for updated PR.\")\n\t\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t\t}\n\tcase github.PullRequestActionEdited:\n\t\t\/\/ if someone changes the base of their PR, we will get this\n\t\t\/\/ event and the changes field will list that the base SHA and\n\t\t\/\/ ref changes so we can detect such a case and retrigger tests\n\t\tvar changes struct {\n\t\t\tBase struct {\n\t\t\t\tRef struct {\n\t\t\t\t\tFrom string `json:\"from\"`\n\t\t\t\t} `json:\"ref\"`\n\t\t\t\tSha struct {\n\t\t\t\t\tFrom string `json:\"from\"`\n\t\t\t\t} `json:\"sha\"`\n\t\t\t} `json:\"base\"`\n\t\t}\n\t\tif err := json.Unmarshal(pr.Changes, &changes); err != nil {\n\t\t\t\/\/ we're detecting this best-effort so we can forget about\n\t\t\t\/\/ the event\n\t\t\treturn nil\n\t\t} else if changes.Base.Ref.From != \"\" || changes.Base.Sha.From != \"\" {\n\t\t\t\/\/ the base of the PR changed and we need to re-test it\n\t\t\treturn buildAllIfTrusted(c, trigger, pr)\n\t\t}\n\tcase github.PullRequestActionSynchronize:\n\t\treturn buildAllIfTrusted(c, trigger, pr)\n\tcase github.PullRequestActionLabeled:\n\t\t\/\/ When a PR is LGTMd, if it is untrusted then build it once.\n\t\tif pr.Label.Name == labels.LGTM {\n\t\t\t_, trusted, err := TrustedPullRequest(c.GitHubClient, trigger, author, org, repo, num, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not validate PR: %s\", err)\n\t\t\t} else if !trusted {\n\t\t\t\tc.Logger.Info(\"Starting all jobs for untrusted PR with LGTM.\")\n\t\t\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype login string\n\nfunc orgRepoAuthor(pr github.PullRequest) (string, string, login) {\n\torg := pr.Base.Repo.Owner.Login\n\trepo := pr.Base.Repo.Name\n\tauthor := pr.User.Login\n\treturn org, repo, login(author)\n}\n\nfunc buildAllIfTrusted(c Client, trigger *plugins.Trigger, pr github.PullRequestEvent) error {\n\t\/\/ When a PR is updated, check that the user is in the org or that an org\n\t\/\/ member has said \"\/ok-to-test\" before building. There's no need to ask\n\t\/\/ for \"\/ok-to-test\" because we do that once when the PR is created.\n\torg, repo, a := orgRepoAuthor(pr.PullRequest)\n\tauthor := string(a)\n\tnum := pr.PullRequest.Number\n\tl, trusted, err := TrustedPullRequest(c.GitHubClient, trigger, author, org, repo, num, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not validate PR: %s\", err)\n\t} else if trusted {\n\t\t\/\/ Eventually remove needs-ok-to-test\n\t\t\/\/ Will not work for org members since labels are not fetched in this case\n\t\tif github.HasLabel(labels.NeedsOkToTest, l) {\n\t\t\tif err := c.GitHubClient.RemoveLabel(org, repo, num, labels.NeedsOkToTest); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tc.Logger.Info(\"Starting all jobs for updated PR.\")\n\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t}\n\treturn nil\n}\n\nfunc welcomeMsg(ghc githubClient, trigger *plugins.Trigger, pr github.PullRequest) error {\n\tvar errors []error\n\torg, repo, a := orgRepoAuthor(pr)\n\tauthor := string(a)\n\tencodedRepoFullName := url.QueryEscape(pr.Base.Repo.FullName)\n\tvar more string\n\tif trigger != nil && trigger.TrustedOrg != \"\" && trigger.TrustedOrg != org {\n\t\tmore = fmt.Sprintf(\"or [%s](https:\/\/github.com\/orgs\/%s\/people) \", trigger.TrustedOrg, trigger.TrustedOrg)\n\t}\n\n\tvar joinOrgURL string\n\tif trigger != nil && trigger.JoinOrgURL != \"\" {\n\t\tjoinOrgURL = trigger.JoinOrgURL\n\t} else {\n\t\tjoinOrgURL = fmt.Sprintf(\"https:\/\/github.com\/orgs\/%s\/people\", org)\n\t}\n\n\tvar comment string\n\tif trigger.IgnoreOkToTest {\n\t\tcomment = fmt.Sprintf(`Hi @%s. Thanks for your PR.\n\nPRs from untrusted users cannot be marked as trusted with `+\"`\/ok-to-test`\"+` in this repo meaning untrusted PR authors can never trigger tests themselves. Collaborators can still trigger tests on the PR using `+\"`\/test all`\"+`.\n\nI understand the commands that are listed [here](https:\/\/go.k8s.io\/bot-commands?repo=%s).\n\n<details>\n\n%s\n<\/details>\n`, author, encodedRepoFullName, plugins.AboutThisBotWithoutCommands)\n\t} else {\n\t\tcomment = fmt.Sprintf(`Hi @%s. Thanks for your PR.\n\nI'm waiting for a [%s](https:\/\/github.com\/orgs\/%s\/people) %smember to verify that this patch is reasonable to test. If it is, they should reply with `+\"`\/ok-to-test`\"+` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](%s) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `+\"`%s`\"+` label.\n\nI understand the commands that are listed [here](https:\/\/go.k8s.io\/bot-commands?repo=%s).\n\n<details>\n\n%s\n<\/details>\n`, author, org, org, more, joinOrgURL, labels.OkToTest, encodedRepoFullName, plugins.AboutThisBotWithoutCommands)\n\t\tif err := ghc.AddLabel(org, repo, pr.Number, labels.NeedsOkToTest); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\tif err := ghc.CreateComment(org, repo, pr.Number, comment); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn errorutil.NewAggregate(errors...)\n\t}\n\treturn nil\n}\n\n\/\/ TrustedPullRequest returns whether or not the given PR should be tested.\n\/\/ It first checks if the author is in the org, then looks for \"ok-to-test\" label.\nfunc TrustedPullRequest(ghc githubClient, trigger *plugins.Trigger, author, org, repo string, num int, l []github.Label) ([]github.Label, bool, error) {\n\t\/\/ First check if the author is a member of the org.\n\tif orgMember, err := TrustedUser(ghc, trigger, author, org, repo); err != nil {\n\t\treturn l, false, fmt.Errorf(\"error checking %s for trust: %v\", author, err)\n\t} else if orgMember {\n\t\treturn l, true, nil\n\t}\n\t\/\/ Then check if PR has ok-to-test label\n\tif l == nil {\n\t\tvar err error\n\t\tl, err = ghc.GetIssueLabels(org, repo, num)\n\t\tif err != nil {\n\t\t\treturn l, false, err\n\t\t}\n\t}\n\treturn l, github.HasLabel(labels.OkToTest, l), nil\n}\n\n\/\/ buildAll acts as if a `\/test all` comment has been placed on the PR\nfunc buildAll(c Client, pr *github.PullRequest, eventGUID string) error {\n\t\/\/ we pass a literal `\/test all` here as it's the most direct way to achieve\n\t\/\/ that functionality from the logic that parses out comment triggers\n\ttoTest, err := FilterPresubmits(false, c.GitHubClient, `\/test all`, pr, c.Config.Presubmits[pr.Base.Repo.FullName])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn RunRequested(c, pr, toTest, eventGUID)\n}\n<commit_msg>Fix panic in giving welcome message<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage trigger\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"k8s.io\/test-infra\/prow\/errorutil\"\n\t\"k8s.io\/test-infra\/prow\/github\"\n\t\"k8s.io\/test-infra\/prow\/labels\"\n\t\"k8s.io\/test-infra\/prow\/plugins\"\n)\n\nfunc handlePR(c Client, trigger *plugins.Trigger, pr github.PullRequestEvent) error {\n\torg, repo, a := orgRepoAuthor(pr.PullRequest)\n\tauthor := string(a)\n\tnum := pr.PullRequest.Number\n\tswitch pr.Action {\n\tcase github.PullRequestActionOpened:\n\t\t\/\/ When a PR is opened, if the author is in the org then build it.\n\t\t\/\/ Otherwise, ask for \"\/ok-to-test\". There's no need to look for previous\n\t\t\/\/ \"\/ok-to-test\" comments since the PR was just opened!\n\t\tmember, err := TrustedUser(c.GitHubClient, trigger, author, org, repo)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not check membership: %s\", err)\n\t\t}\n\t\tif member {\n\t\t\tc.Logger.Info(\"Starting all jobs for new PR.\")\n\t\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t\t}\n\t\tc.Logger.Infof(\"Welcome message to PR author %q.\", author)\n\t\tif err := welcomeMsg(c.GitHubClient, trigger, pr.PullRequest); err != nil {\n\t\t\treturn fmt.Errorf(\"could not welcome non-org member %q: %v\", author, err)\n\t\t}\n\tcase github.PullRequestActionReopened:\n\t\t\/\/ When a PR is reopened, check that the user is in the org or that an org\n\t\t\/\/ member had said \"\/ok-to-test\" before building, resulting in label ok-to-test.\n\t\tl, trusted, err := TrustedPullRequest(c.GitHubClient, trigger, author, org, repo, num, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not validate PR: %s\", err)\n\t\t} else if trusted {\n\t\t\t\/\/ Eventually remove need-ok-to-test\n\t\t\t\/\/ Does not work for TrustedUser() == true since labels are not fetched in this case\n\t\t\tif github.HasLabel(labels.NeedsOkToTest, l) {\n\t\t\t\tif err := c.GitHubClient.RemoveLabel(org, repo, num, labels.NeedsOkToTest); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.Logger.Info(\"Starting all jobs for updated PR.\")\n\t\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t\t}\n\tcase github.PullRequestActionEdited:\n\t\t\/\/ if someone changes the base of their PR, we will get this\n\t\t\/\/ event and the changes field will list that the base SHA and\n\t\t\/\/ ref changes so we can detect such a case and retrigger tests\n\t\tvar changes struct {\n\t\t\tBase struct {\n\t\t\t\tRef struct {\n\t\t\t\t\tFrom string `json:\"from\"`\n\t\t\t\t} `json:\"ref\"`\n\t\t\t\tSha struct {\n\t\t\t\t\tFrom string `json:\"from\"`\n\t\t\t\t} `json:\"sha\"`\n\t\t\t} `json:\"base\"`\n\t\t}\n\t\tif err := json.Unmarshal(pr.Changes, &changes); err != nil {\n\t\t\t\/\/ we're detecting this best-effort so we can forget about\n\t\t\t\/\/ the event\n\t\t\treturn nil\n\t\t} else if changes.Base.Ref.From != \"\" || changes.Base.Sha.From != \"\" {\n\t\t\t\/\/ the base of the PR changed and we need to re-test it\n\t\t\treturn buildAllIfTrusted(c, trigger, pr)\n\t\t}\n\tcase github.PullRequestActionSynchronize:\n\t\treturn buildAllIfTrusted(c, trigger, pr)\n\tcase github.PullRequestActionLabeled:\n\t\t\/\/ When a PR is LGTMd, if it is untrusted then build it once.\n\t\tif pr.Label.Name == labels.LGTM {\n\t\t\t_, trusted, err := TrustedPullRequest(c.GitHubClient, trigger, author, org, repo, num, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not validate PR: %s\", err)\n\t\t\t} else if !trusted {\n\t\t\t\tc.Logger.Info(\"Starting all jobs for untrusted PR with LGTM.\")\n\t\t\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype login string\n\nfunc orgRepoAuthor(pr github.PullRequest) (string, string, login) {\n\torg := pr.Base.Repo.Owner.Login\n\trepo := pr.Base.Repo.Name\n\tauthor := pr.User.Login\n\treturn org, repo, login(author)\n}\n\nfunc buildAllIfTrusted(c Client, trigger *plugins.Trigger, pr github.PullRequestEvent) error {\n\t\/\/ When a PR is updated, check that the user is in the org or that an org\n\t\/\/ member has said \"\/ok-to-test\" before building. There's no need to ask\n\t\/\/ for \"\/ok-to-test\" because we do that once when the PR is created.\n\torg, repo, a := orgRepoAuthor(pr.PullRequest)\n\tauthor := string(a)\n\tnum := pr.PullRequest.Number\n\tl, trusted, err := TrustedPullRequest(c.GitHubClient, trigger, author, org, repo, num, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not validate PR: %s\", err)\n\t} else if trusted {\n\t\t\/\/ Eventually remove needs-ok-to-test\n\t\t\/\/ Will not work for org members since labels are not fetched in this case\n\t\tif github.HasLabel(labels.NeedsOkToTest, l) {\n\t\t\tif err := c.GitHubClient.RemoveLabel(org, repo, num, labels.NeedsOkToTest); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tc.Logger.Info(\"Starting all jobs for updated PR.\")\n\t\treturn buildAll(c, &pr.PullRequest, pr.GUID)\n\t}\n\treturn nil\n}\n\nfunc welcomeMsg(ghc githubClient, trigger *plugins.Trigger, pr github.PullRequest) error {\n\tvar errors []error\n\torg, repo, a := orgRepoAuthor(pr)\n\tauthor := string(a)\n\tencodedRepoFullName := url.QueryEscape(pr.Base.Repo.FullName)\n\tvar more string\n\tif trigger != nil && trigger.TrustedOrg != \"\" && trigger.TrustedOrg != org {\n\t\tmore = fmt.Sprintf(\"or [%s](https:\/\/github.com\/orgs\/%s\/people) \", trigger.TrustedOrg, trigger.TrustedOrg)\n\t}\n\n\tvar joinOrgURL string\n\tif trigger != nil && trigger.JoinOrgURL != \"\" {\n\t\tjoinOrgURL = trigger.JoinOrgURL\n\t} else {\n\t\tjoinOrgURL = fmt.Sprintf(\"https:\/\/github.com\/orgs\/%s\/people\", org)\n\t}\n\n\tvar comment string\n\tif trigger != nil && trigger.IgnoreOkToTest {\n\t\tcomment = fmt.Sprintf(`Hi @%s. Thanks for your PR.\n\nPRs from untrusted users cannot be marked as trusted with `+\"`\/ok-to-test`\"+` in this repo meaning untrusted PR authors can never trigger tests themselves. Collaborators can still trigger tests on the PR using `+\"`\/test all`\"+`.\n\nI understand the commands that are listed [here](https:\/\/go.k8s.io\/bot-commands?repo=%s).\n\n<details>\n\n%s\n<\/details>\n`, author, encodedRepoFullName, plugins.AboutThisBotWithoutCommands)\n\t} else {\n\t\tcomment = fmt.Sprintf(`Hi @%s. Thanks for your PR.\n\nI'm waiting for a [%s](https:\/\/github.com\/orgs\/%s\/people) %smember to verify that this patch is reasonable to test. If it is, they should reply with `+\"`\/ok-to-test`\"+` on its own line. Until that is done, I will not automatically test new commits in this PR, but the usual testing commands by org members will still work. Regular contributors should [join the org](%s) to skip this step.\n\nOnce the patch is verified, the new status will be reflected by the `+\"`%s`\"+` label.\n\nI understand the commands that are listed [here](https:\/\/go.k8s.io\/bot-commands?repo=%s).\n\n<details>\n\n%s\n<\/details>\n`, author, org, org, more, joinOrgURL, labels.OkToTest, encodedRepoFullName, plugins.AboutThisBotWithoutCommands)\n\t\tif err := ghc.AddLabel(org, repo, pr.Number, labels.NeedsOkToTest); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\tif err := ghc.CreateComment(org, repo, pr.Number, comment); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn errorutil.NewAggregate(errors...)\n\t}\n\treturn nil\n}\n\n\/\/ TrustedPullRequest returns whether or not the given PR should be tested.\n\/\/ It first checks if the author is in the org, then looks for \"ok-to-test\" label.\nfunc TrustedPullRequest(ghc githubClient, trigger *plugins.Trigger, author, org, repo string, num int, l []github.Label) ([]github.Label, bool, error) {\n\t\/\/ First check if the author is a member of the org.\n\tif orgMember, err := TrustedUser(ghc, trigger, author, org, repo); err != nil {\n\t\treturn l, false, fmt.Errorf(\"error checking %s for trust: %v\", author, err)\n\t} else if orgMember {\n\t\treturn l, true, nil\n\t}\n\t\/\/ Then check if PR has ok-to-test label\n\tif l == nil {\n\t\tvar err error\n\t\tl, err = ghc.GetIssueLabels(org, repo, num)\n\t\tif err != nil {\n\t\t\treturn l, false, err\n\t\t}\n\t}\n\treturn l, github.HasLabel(labels.OkToTest, l), nil\n}\n\n\/\/ buildAll acts as if a `\/test all` comment has been placed on the PR\nfunc buildAll(c Client, pr *github.PullRequest, eventGUID string) error {\n\t\/\/ we pass a literal `\/test all` here as it's the most direct way to achieve\n\t\/\/ that functionality from the logic that parses out comment triggers\n\ttoTest, err := FilterPresubmits(false, c.GitHubClient, `\/test all`, pr, c.Config.Presubmits[pr.Base.Repo.FullName])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn RunRequested(c, pr, toTest, eventGUID)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sessionctx\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/errors\"\n\t\"github.com\/pingcap\/parser\/model\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/metrics\"\n\t\"github.com\/pingcap\/tidb\/owner\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/util\"\n\t\"github.com\/pingcap\/tidb\/util\/kvcache\"\n\t\"github.com\/pingcap\/tidb\/util\/sli\"\n\t\"github.com\/pingcap\/tipb\/go-binlog\"\n\t\"github.com\/tikv\/client-go\/v2\/oracle\"\n)\n\n\/\/ InfoschemaMetaVersion is a workaround. Due to circular dependency,\n\/\/ can not return the complete interface. But SchemaMetaVersion is widely used for logging.\n\/\/ So we give a convenience for that.\n\/\/ FIXME: remove this interface\ntype InfoschemaMetaVersion interface {\n\tSchemaMetaVersion() int64\n}\n\n\/\/ Context is an interface for transaction and executive args environment.\ntype Context interface {\n\t\/\/ NewTxn creates a new transaction for further execution.\n\t\/\/ If old transaction is valid, it is committed first.\n\t\/\/ It's used in BEGIN statement and DDL statements to commit old transaction.\n\tNewTxn(context.Context) error\n\t\/\/ NewStaleTxnWithStartTS initializes a staleness transaction with the given StartTS.\n\tNewStaleTxnWithStartTS(ctx context.Context, startTS uint64) error\n\n\t\/\/ Txn returns the current transaction which is created before executing a statement.\n\t\/\/ The returned kv.Transaction is not nil, but it maybe pending or invalid.\n\t\/\/ If the active parameter is true, call this function will wait for the pending txn\n\t\/\/ to become valid.\n\tTxn(active bool) (kv.Transaction, error)\n\n\t\/\/ GetClient gets a kv.Client.\n\tGetClient() kv.Client\n\n\t\/\/ GetClient gets a kv.Client.\n\tGetMPPClient() kv.MPPClient\n\n\t\/\/ SetValue saves a value associated with this context for key.\n\tSetValue(key fmt.Stringer, value interface{})\n\n\t\/\/ Value returns the value associated with this context for key.\n\tValue(key fmt.Stringer) interface{}\n\n\t\/\/ ClearValue clears the value associated with this context for key.\n\tClearValue(key fmt.Stringer)\n\n\tGetInfoSchema() InfoschemaMetaVersion\n\n\tGetSessionVars() *variable.SessionVars\n\n\tGetSessionManager() util.SessionManager\n\n\t\/\/ RefreshTxnCtx commits old transaction without retry,\n\t\/\/ and creates a new transaction.\n\t\/\/ now just for load data and batch insert.\n\tRefreshTxnCtx(context.Context) error\n\n\t\/\/ RefreshVars refreshes modified global variable to current session.\n\t\/\/ only used to daemon session like `statsHandle` to detect global variable change.\n\tRefreshVars(context.Context) error\n\n\t\/\/ InitTxnWithStartTS initializes a transaction with startTS.\n\t\/\/ It should be called right before we builds an executor.\n\tInitTxnWithStartTS(startTS uint64) error\n\n\t\/\/ GetStore returns the store of session.\n\tGetStore() kv.Storage\n\n\t\/\/ PreparedPlanCache returns the cache of the physical plan\n\tPreparedPlanCache() *kvcache.SimpleLRUCache\n\n\t\/\/ StoreQueryFeedback stores the query feedback.\n\tStoreQueryFeedback(feedback interface{})\n\n\t\/\/ HasDirtyContent checks whether there's dirty update on the given table.\n\tHasDirtyContent(tid int64) bool\n\n\t\/\/ StmtCommit flush all changes by the statement to the underlying transaction.\n\tStmtCommit()\n\t\/\/ StmtRollback provides statement level rollback.\n\tStmtRollback()\n\t\/\/ StmtGetMutation gets the binlog mutation for current statement.\n\tStmtGetMutation(int64) *binlog.TableMutation\n\t\/\/ DDLOwnerChecker returns owner.DDLOwnerChecker.\n\tDDLOwnerChecker() owner.DDLOwnerChecker\n\t\/\/ AddTableLock adds table lock to the session lock map.\n\tAddTableLock([]model.TableLockTpInfo)\n\t\/\/ ReleaseTableLocks releases table locks in the session lock map.\n\tReleaseTableLocks(locks []model.TableLockTpInfo)\n\t\/\/ ReleaseTableLockByTableID releases table locks in the session lock map by table ID.\n\tReleaseTableLockByTableIDs(tableIDs []int64)\n\t\/\/ CheckTableLocked checks the table lock.\n\tCheckTableLocked(tblID int64) (bool, model.TableLockType)\n\t\/\/ GetAllTableLocks gets all table locks table id and db id hold by the session.\n\tGetAllTableLocks() []model.TableLockTpInfo\n\t\/\/ ReleaseAllTableLocks releases all table locks hold by the session.\n\tReleaseAllTableLocks()\n\t\/\/ HasLockedTables uses to check whether this session locked any tables.\n\tHasLockedTables() bool\n\t\/\/ PrepareTSFuture uses to prepare timestamp by future.\n\tPrepareTSFuture(ctx context.Context)\n\t\/\/ StoreIndexUsage stores the index usage information.\n\tStoreIndexUsage(tblID int64, idxID int64, rowsSelected int64)\n\t\/\/ GetTxnWriteThroughputSLI returns the TxnWriteThroughputSLI.\n\tGetTxnWriteThroughputSLI() *sli.TxnWriteThroughputSLI\n\t\/\/ GetBuiltinFunctionUsage returns the BuiltinFunctionUsage of current Context, which is not thread safe.\n\t\/\/ Use primitive map type to prevent circular import. Should convert it to telemetry.BuiltinFunctionUsage before using.\n\tGetBuiltinFunctionUsage() map[string]uint32\n}\n\ntype basicCtxType int\n\nfunc (t basicCtxType) String() string {\n\tswitch t {\n\tcase QueryString:\n\t\treturn \"query_string\"\n\tcase Initing:\n\t\treturn \"initing\"\n\tcase LastExecuteDDL:\n\t\treturn \"last_execute_ddl\"\n\t}\n\treturn \"unknown\"\n}\n\n\/\/ Context keys.\nconst (\n\t\/\/ QueryString is the key for original query string.\n\tQueryString basicCtxType = 1\n\t\/\/ Initing is the key for indicating if the server is running bootstrap or upgrade job.\n\tIniting basicCtxType = 2\n\t\/\/ LastExecuteDDL is the key for whether the session execute a ddl command last time.\n\tLastExecuteDDL basicCtxType = 3\n)\n\n\/\/ ValidateSnapshotReadTS strictly validates that readTS does not exceed the PD timestamp\nfunc ValidateSnapshotReadTS(ctx context.Context, sctx Context, readTS uint64) error {\n\tlatestTS, err := sctx.GetStore().GetOracle().GetLowResolutionTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})\n\t\/\/ If we fail to get latestTS or the readTS exceeds it, get a timestamp from PD to double check\n\tif err != nil || readTS > latestTS {\n\t\tmetrics.ValidateReadTSFromPDCount.Inc()\n\t\tcurrentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"fail to validate read timestamp: %v\", err)\n\t\t}\n\t\tif readTS > currentVer.Ver {\n\t\t\treturn errors.Errorf(\"cannot set read timestamp to a future time\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ How far future from now ValidateStaleReadTS allows at most\nconst allowedTimeFromNow = 100 * time.Millisecond\n\n\/\/ ValidateStaleReadTS validates that readTS does not exceed the current time not strictly.\nfunc ValidateStaleReadTS(ctx context.Context, sctx Context, readTS uint64) error {\n\tcurrentTS, err := sctx.GetStore().GetOracle().GetStaleTimestamp(ctx, oracle.GlobalTxnScope, 0)\n\t\/\/ If we fail to calculate currentTS from local time, fallback to get a timestamp from PD\n\tif err != nil {\n\t\tmetrics.ValidateReadTSFromPDCount.Inc()\n\t\tcurrentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"fail to validate read timestamp: %v\", err)\n\t\t}\n\t\tcurrentTS = currentVer.Ver\n\t}\n\tif oracle.GetTimeFromTS(readTS).After(oracle.GetTimeFromTS(currentTS).Add(allowedTimeFromNow)) {\n\t\treturn errors.Errorf(\"cannot set read timestamp to a future time\")\n\t}\n\treturn nil\n}\n<commit_msg>sessionctx\/context: fix comment typo to help generate godoc (#27901)<commit_after>\/\/ Copyright 2018 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sessionctx\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/errors\"\n\t\"github.com\/pingcap\/parser\/model\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/metrics\"\n\t\"github.com\/pingcap\/tidb\/owner\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/util\"\n\t\"github.com\/pingcap\/tidb\/util\/kvcache\"\n\t\"github.com\/pingcap\/tidb\/util\/sli\"\n\t\"github.com\/pingcap\/tipb\/go-binlog\"\n\t\"github.com\/tikv\/client-go\/v2\/oracle\"\n)\n\n\/\/ InfoschemaMetaVersion is a workaround. Due to circular dependency,\n\/\/ can not return the complete interface. But SchemaMetaVersion is widely used for logging.\n\/\/ So we give a convenience for that.\n\/\/ FIXME: remove this interface\ntype InfoschemaMetaVersion interface {\n\tSchemaMetaVersion() int64\n}\n\n\/\/ Context is an interface for transaction and executive args environment.\ntype Context interface {\n\t\/\/ NewTxn creates a new transaction for further execution.\n\t\/\/ If old transaction is valid, it is committed first.\n\t\/\/ It's used in BEGIN statement and DDL statements to commit old transaction.\n\tNewTxn(context.Context) error\n\t\/\/ NewStaleTxnWithStartTS initializes a staleness transaction with the given StartTS.\n\tNewStaleTxnWithStartTS(ctx context.Context, startTS uint64) error\n\n\t\/\/ Txn returns the current transaction which is created before executing a statement.\n\t\/\/ The returned kv.Transaction is not nil, but it maybe pending or invalid.\n\t\/\/ If the active parameter is true, call this function will wait for the pending txn\n\t\/\/ to become valid.\n\tTxn(active bool) (kv.Transaction, error)\n\n\t\/\/ GetClient gets a kv.Client.\n\tGetClient() kv.Client\n\n\t\/\/ GetMPPClient gets a kv.MPPClient.\n\tGetMPPClient() kv.MPPClient\n\n\t\/\/ SetValue saves a value associated with this context for key.\n\tSetValue(key fmt.Stringer, value interface{})\n\n\t\/\/ Value returns the value associated with this context for key.\n\tValue(key fmt.Stringer) interface{}\n\n\t\/\/ ClearValue clears the value associated with this context for key.\n\tClearValue(key fmt.Stringer)\n\n\tGetInfoSchema() InfoschemaMetaVersion\n\n\tGetSessionVars() *variable.SessionVars\n\n\tGetSessionManager() util.SessionManager\n\n\t\/\/ RefreshTxnCtx commits old transaction without retry,\n\t\/\/ and creates a new transaction.\n\t\/\/ now just for load data and batch insert.\n\tRefreshTxnCtx(context.Context) error\n\n\t\/\/ RefreshVars refreshes modified global variable to current session.\n\t\/\/ only used to daemon session like `statsHandle` to detect global variable change.\n\tRefreshVars(context.Context) error\n\n\t\/\/ InitTxnWithStartTS initializes a transaction with startTS.\n\t\/\/ It should be called right before we builds an executor.\n\tInitTxnWithStartTS(startTS uint64) error\n\n\t\/\/ GetStore returns the store of session.\n\tGetStore() kv.Storage\n\n\t\/\/ PreparedPlanCache returns the cache of the physical plan\n\tPreparedPlanCache() *kvcache.SimpleLRUCache\n\n\t\/\/ StoreQueryFeedback stores the query feedback.\n\tStoreQueryFeedback(feedback interface{})\n\n\t\/\/ HasDirtyContent checks whether there's dirty update on the given table.\n\tHasDirtyContent(tid int64) bool\n\n\t\/\/ StmtCommit flush all changes by the statement to the underlying transaction.\n\tStmtCommit()\n\t\/\/ StmtRollback provides statement level rollback.\n\tStmtRollback()\n\t\/\/ StmtGetMutation gets the binlog mutation for current statement.\n\tStmtGetMutation(int64) *binlog.TableMutation\n\t\/\/ DDLOwnerChecker returns owner.DDLOwnerChecker.\n\tDDLOwnerChecker() owner.DDLOwnerChecker\n\t\/\/ AddTableLock adds table lock to the session lock map.\n\tAddTableLock([]model.TableLockTpInfo)\n\t\/\/ ReleaseTableLocks releases table locks in the session lock map.\n\tReleaseTableLocks(locks []model.TableLockTpInfo)\n\t\/\/ ReleaseTableLockByTableIDs releases table locks in the session lock map by table IDs.\n\tReleaseTableLockByTableIDs(tableIDs []int64)\n\t\/\/ CheckTableLocked checks the table lock.\n\tCheckTableLocked(tblID int64) (bool, model.TableLockType)\n\t\/\/ GetAllTableLocks gets all table locks table id and db id hold by the session.\n\tGetAllTableLocks() []model.TableLockTpInfo\n\t\/\/ ReleaseAllTableLocks releases all table locks hold by the session.\n\tReleaseAllTableLocks()\n\t\/\/ HasLockedTables uses to check whether this session locked any tables.\n\tHasLockedTables() bool\n\t\/\/ PrepareTSFuture uses to prepare timestamp by future.\n\tPrepareTSFuture(ctx context.Context)\n\t\/\/ StoreIndexUsage stores the index usage information.\n\tStoreIndexUsage(tblID int64, idxID int64, rowsSelected int64)\n\t\/\/ GetTxnWriteThroughputSLI returns the TxnWriteThroughputSLI.\n\tGetTxnWriteThroughputSLI() *sli.TxnWriteThroughputSLI\n\t\/\/ GetBuiltinFunctionUsage returns the BuiltinFunctionUsage of current Context, which is not thread safe.\n\t\/\/ Use primitive map type to prevent circular import. Should convert it to telemetry.BuiltinFunctionUsage before using.\n\tGetBuiltinFunctionUsage() map[string]uint32\n}\n\ntype basicCtxType int\n\nfunc (t basicCtxType) String() string {\n\tswitch t {\n\tcase QueryString:\n\t\treturn \"query_string\"\n\tcase Initing:\n\t\treturn \"initing\"\n\tcase LastExecuteDDL:\n\t\treturn \"last_execute_ddl\"\n\t}\n\treturn \"unknown\"\n}\n\n\/\/ Context keys.\nconst (\n\t\/\/ QueryString is the key for original query string.\n\tQueryString basicCtxType = 1\n\t\/\/ Initing is the key for indicating if the server is running bootstrap or upgrade job.\n\tIniting basicCtxType = 2\n\t\/\/ LastExecuteDDL is the key for whether the session execute a ddl command last time.\n\tLastExecuteDDL basicCtxType = 3\n)\n\n\/\/ ValidateSnapshotReadTS strictly validates that readTS does not exceed the PD timestamp\nfunc ValidateSnapshotReadTS(ctx context.Context, sctx Context, readTS uint64) error {\n\tlatestTS, err := sctx.GetStore().GetOracle().GetLowResolutionTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})\n\t\/\/ If we fail to get latestTS or the readTS exceeds it, get a timestamp from PD to double check\n\tif err != nil || readTS > latestTS {\n\t\tmetrics.ValidateReadTSFromPDCount.Inc()\n\t\tcurrentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"fail to validate read timestamp: %v\", err)\n\t\t}\n\t\tif readTS > currentVer.Ver {\n\t\t\treturn errors.Errorf(\"cannot set read timestamp to a future time\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ How far future from now ValidateStaleReadTS allows at most\nconst allowedTimeFromNow = 100 * time.Millisecond\n\n\/\/ ValidateStaleReadTS validates that readTS does not exceed the current time not strictly.\nfunc ValidateStaleReadTS(ctx context.Context, sctx Context, readTS uint64) error {\n\tcurrentTS, err := sctx.GetStore().GetOracle().GetStaleTimestamp(ctx, oracle.GlobalTxnScope, 0)\n\t\/\/ If we fail to calculate currentTS from local time, fallback to get a timestamp from PD\n\tif err != nil {\n\t\tmetrics.ValidateReadTSFromPDCount.Inc()\n\t\tcurrentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"fail to validate read timestamp: %v\", err)\n\t\t}\n\t\tcurrentTS = currentVer.Ver\n\t}\n\tif oracle.GetTimeFromTS(readTS).After(oracle.GetTimeFromTS(currentTS).Add(allowedTimeFromNow)) {\n\t\treturn errors.Errorf(\"cannot set read timestamp to a future time\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package acceptance_tests\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\n\/*\n\tTests support for external certificate lists. Test structure:\n\n\t1. Deploy HAProxy with internal certificate A\n\t2. Update external certificate list to add certificate B\n\t3. Verify that HTTPS requests using certificates A, B are working and C is not working\n\t4. Update external certificate list to remove B and add C\n\t5. Verify that HTTPS requests using certificates A, C are working and B is not working\n\n*\/\n\nvar _ = Describe(\"External Certificate Lists\", func() {\n\tIt(\"Uses the correct certs\", func() {\n\t\topsfileSSLCertificate := `---\n# Ensure HAProxy is in daemon mode (syslog server cannot be stdout)\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/syslog_server?\n value: \"\/var\/vcap\/sys\/log\/haproxy\/syslog\"\n# Add CertA as a regular certificate\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/crt_list?\/-\n value:\n snifilter:\n - cert_a.haproxy.internal\n ssl_pem:\n cert_chain: ((cert_a.certificate))((cert_a.ca))\n private_key: ((cert_a.private_key))\n\n# Configure external certificate list\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list?\n value: true\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list_file?\n value: ((ext_crt_list_path))\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list_policy?\n value: continue\n\n# Generate CA and certificates\n- type: replace\n path: \/variables?\/-\n value:\n name: common_ca\n type: certificate\n options:\n is_ca: true\n common_name: bosh\n- type: replace\n path: \/variables?\/-\n value:\n name: cert_a\n type: certificate\n options:\n ca: common_ca\n common_name: cert_a.haproxy.internal\n alternative_names: [cert_a.haproxy.internal]\n- type: replace\n path: \/variables?\/-\n value:\n name: cert_b\n type: certificate\n options:\n ca: common_ca\n common_name: cert_b.haproxy.internal\n alternative_names: [cert_b.haproxy.internal]\n- type: replace\n path: \/variables?\/-\n value:\n name: cert_c\n type: certificate\n options:\n ca: common_ca\n common_name: cert_c.haproxy.internal\n alternative_names: [cert_c.haproxy.internal]\n`\n\n\t\thaproxyBackendPort := 12000\n\t\textCrtListPath := \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext-crt-list\"\n\t\thaproxyInfo, varsStoreReader := deployHAProxy(baseManifestVars{\n\t\t\thaproxyBackendPort: haproxyBackendPort,\n\t\t\thaproxyBackendServers: []string{\"127.0.0.1\"},\n\t\t\tdeploymentName: defaultDeploymentName,\n\t\t}, []string{opsfileSSLCertificate}, map[string]interface{}{\n\t\t\t\"ext_crt_list_path\": extCrtListPath,\n\t\t}, true)\n\n\t\tvar creds struct {\n\t\t\tCertA struct {\n\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t} `yaml:\"cert_a\"`\n\t\t\tCertB struct {\n\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t} `yaml:\"cert_b\"`\n\t\t\tCertC struct {\n\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t} `yaml:\"cert_c\"`\n\t\t}\n\t\terr := varsStoreReader(&creds)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\/\/ Wait for HAProxy to accept TCP connections\n\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\tcloseLocalServer, localPort := startDefaultTestServer()\n\t\tdefer closeLocalServer()\n\n\t\tcloseTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort)\n\t\tdefer closeTunnel()\n\n\t\tclient := buildHTTPClient(\n\t\t\t[]string{creds.CertA.CA, creds.CertB.CA, creds.CertC.CA},\n\t\t\tmap[string]string{\n\t\t\t\t\"cert_a.haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP),\n\t\t\t\t\"cert_b.haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP),\n\t\t\t\t\"cert_c.haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP),\n\t\t\t},\n\t\t\t[]tls.Certificate{},\n\t\t)\n\n\t\tBy(\"Sending a request to HAProxy using internal cert A works (default cert)\")\n\t\tresp, err := client.Get(\"https:\/\/cert_a.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert B fails (external cert not yet added)\")\n\t\t_, err = client.Get(\"https:\/\/cert_b.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_b.haproxy.internal\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert C fails (external cert not yet added)\")\n\t\t_, err = client.Get(\"https:\/\/cert_c.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_c.haproxy.internal\"))\n\n\t\t\/\/ external certs format is a concatenated file containing certificate PEM, CA PEM, private key PEM\n\t\tpemChainCertB := bytes.NewBufferString(strings.Join([]string{creds.CertB.Certificate, creds.CertB.CA, creds.CertB.PrivateKey}, \"\\n\"))\n\t\tpemChainCertBPath := \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/cert_b.haproxy.internal.pem\"\n\t\tpemChainCertC := bytes.NewBufferString(strings.Join([]string{creds.CertC.Certificate, creds.CertC.CA, creds.CertC.PrivateKey}, \"\\n\"))\n\t\tpemChainCertCPath := \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/cert_c.haproxy.internal.pem\"\n\n\t\textCrtList := bytes.NewBufferString(fmt.Sprintf(\"%s cert_b.haproxy.internal\\n\", pemChainCertBPath))\n\n\t\tBy(\"Uploading external certificates and external cert list to HAProxy\")\n\t\tuploadFile(haproxyInfo, pemChainCertB, pemChainCertBPath)\n\t\tdefer deleteRemoteFile(haproxyInfo, pemChainCertBPath)\n\t\tuploadFile(haproxyInfo, extCrtList, extCrtListPath)\n\t\tdefer deleteRemoteFile(haproxyInfo, extCrtListPath)\n\n\t\tBy(\"Reloading HAProxy\")\n\t\treloadHAProxy(haproxyInfo)\n\n\t\tBy(\"Waiting for HAProxy to start listening (up to two minutes)\")\n\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\tBy(\"Sending a request to HAProxy using internal cert A works (default cert)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_a.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert B works (external cert now added)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_b.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert C fails (external cert not yet added)\")\n\t\t_, err = client.Get(\"https:\/\/cert_c.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_c.haproxy.internal\"))\n\n\t\tBy(\"Removing external cert B and adding externat cert C to external cert list\")\n\t\textCrtList = bytes.NewBufferString(fmt.Sprintf(\"%s cert_c.haproxy.internal\\n\", pemChainCertCPath))\n\n\t\tdeleteRemoteFile(haproxyInfo, pemChainCertBPath)\n\t\tuploadFile(haproxyInfo, pemChainCertC, pemChainCertCPath)\n\t\tdefer deleteRemoteFile(haproxyInfo, pemChainCertCPath)\n\t\tuploadFile(haproxyInfo, extCrtList, extCrtListPath)\n\n\t\tBy(\"Reloading HAProxy\")\n\t\treloadHAProxy(haproxyInfo)\n\n\t\tBy(\"Waiting for HAProxy to start listening (up to two minutes)\")\n\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\tBy(\"Sending a request to HAProxy using internal cert A works (default cert)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_a.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert B fails (external cert that was removed)\")\n\t\t_, err = client.Get(\"https:\/\/cert_b.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_b.haproxy.internal\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert C works (external cert that was added)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_c.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\t})\n})\n\nfunc deleteRemoteFile(haproxyInfo haproxyInfo, remotePath string) {\n\t_, _, err := runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, fmt.Sprintf(\"sudo rm -f %s\", remotePath))\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc uploadFile(haproxyInfo haproxyInfo, contents io.Reader, remotePath string) {\n\t\/\/ Due to permissions issues with the SCP library\n\t\/\/ we need to upload to the tmp dir first, then copy to the intended directory\n\t\/\/ Finally chown to VCAP user to BOSH processes have permissions to read\/write the file\n\tbasename := filepath.Base(remotePath)\n\ttmpRemotePath := fmt.Sprintf(\"\/tmp\/%s\", basename)\n\n\terr := copyFileToRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, tmpRemotePath, contents, \"0777\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, _, err = runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, fmt.Sprintf(\"sudo mv %s %s\", tmpRemotePath, remotePath))\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, _, err = runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, fmt.Sprintf(\"sudo chown vcap:vcap %s\", remotePath))\n\tExpect(err).NotTo(HaveOccurred())\n}\n<commit_msg>Add acceptance test for ext_crt_list_policy: fail<commit_after>package acceptance_tests\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\n\/*\n\tTests support for external certificate lists. Test structure:\n\n\t1. Deploy HAProxy with internal certificate A\n\t2. Update external certificate list to add certificate B\n\t3. Verify that HTTPS requests using certificates A, B are working and C is not working\n\t4. Update external certificate list to remove B and add C\n\t5. Verify that HTTPS requests using certificates A, C are working and B is not working\n\n*\/\n\nvar _ = Describe(\"External Certificate Lists\", func() {\n\thaproxyBackendPort := 12000\n\n\tIt(\"Uses the correct certs\", func() {\n\t\topsfileSSLCertificate := `---\n# Ensure HAProxy is in daemon mode (syslog server cannot be stdout)\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/syslog_server?\n value: \"\/var\/vcap\/sys\/log\/haproxy\/syslog\"\n# Add CertA as a regular certificate\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/crt_list?\/-\n value:\n snifilter:\n - cert_a.haproxy.internal\n ssl_pem:\n cert_chain: ((cert_a.certificate))((cert_a.ca))\n private_key: ((cert_a.private_key))\n\n# Configure external certificate list\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list?\n value: true\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list_file?\n value: ((ext_crt_list_path))\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list_policy?\n value: continue\n\n# Generate CA and certificates\n- type: replace\n path: \/variables?\/-\n value:\n name: common_ca\n type: certificate\n options:\n is_ca: true\n common_name: bosh\n- type: replace\n path: \/variables?\/-\n value:\n name: cert_a\n type: certificate\n options:\n ca: common_ca\n common_name: cert_a.haproxy.internal\n alternative_names: [cert_a.haproxy.internal]\n- type: replace\n path: \/variables?\/-\n value:\n name: cert_b\n type: certificate\n options:\n ca: common_ca\n common_name: cert_b.haproxy.internal\n alternative_names: [cert_b.haproxy.internal]\n- type: replace\n path: \/variables?\/-\n value:\n name: cert_c\n type: certificate\n options:\n ca: common_ca\n common_name: cert_c.haproxy.internal\n alternative_names: [cert_c.haproxy.internal]\n`\n\n\t\textCrtListPath := \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext-crt-list\"\n\t\thaproxyInfo, varsStoreReader := deployHAProxy(baseManifestVars{\n\t\t\thaproxyBackendPort: haproxyBackendPort,\n\t\t\thaproxyBackendServers: []string{\"127.0.0.1\"},\n\t\t\tdeploymentName: defaultDeploymentName,\n\t\t}, []string{opsfileSSLCertificate}, map[string]interface{}{\n\t\t\t\"ext_crt_list_path\": extCrtListPath,\n\t\t}, true)\n\n\t\tvar creds struct {\n\t\t\tCertA struct {\n\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t} `yaml:\"cert_a\"`\n\t\t\tCertB struct {\n\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t} `yaml:\"cert_b\"`\n\t\t\tCertC struct {\n\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t} `yaml:\"cert_c\"`\n\t\t}\n\t\terr := varsStoreReader(&creds)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\/\/ Wait for HAProxy to accept TCP connections\n\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\tcloseLocalServer, localPort := startDefaultTestServer()\n\t\tdefer closeLocalServer()\n\n\t\tcloseTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort)\n\t\tdefer closeTunnel()\n\n\t\tclient := buildHTTPClient(\n\t\t\t[]string{creds.CertA.CA, creds.CertB.CA, creds.CertC.CA},\n\t\t\tmap[string]string{\n\t\t\t\t\"cert_a.haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP),\n\t\t\t\t\"cert_b.haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP),\n\t\t\t\t\"cert_c.haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP),\n\t\t\t},\n\t\t\t[]tls.Certificate{},\n\t\t)\n\n\t\tBy(\"Sending a request to HAProxy using internal cert A works (default cert)\")\n\t\tresp, err := client.Get(\"https:\/\/cert_a.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert B fails (external cert not yet added)\")\n\t\t_, err = client.Get(\"https:\/\/cert_b.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_b.haproxy.internal\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert C fails (external cert not yet added)\")\n\t\t_, err = client.Get(\"https:\/\/cert_c.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_c.haproxy.internal\"))\n\n\t\t\/\/ external certs format is a concatenated file containing certificate PEM, CA PEM, private key PEM\n\t\tpemChainCertB := bytes.NewBufferString(strings.Join([]string{creds.CertB.Certificate, creds.CertB.CA, creds.CertB.PrivateKey}, \"\\n\"))\n\t\tpemChainCertBPath := \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/cert_b.haproxy.internal.pem\"\n\t\tpemChainCertC := bytes.NewBufferString(strings.Join([]string{creds.CertC.Certificate, creds.CertC.CA, creds.CertC.PrivateKey}, \"\\n\"))\n\t\tpemChainCertCPath := \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/cert_c.haproxy.internal.pem\"\n\n\t\textCrtList := bytes.NewBufferString(fmt.Sprintf(\"%s cert_b.haproxy.internal\\n\", pemChainCertBPath))\n\n\t\tBy(\"Uploading external certificates and external cert list to HAProxy\")\n\t\tuploadFile(haproxyInfo, pemChainCertB, pemChainCertBPath)\n\t\tdefer deleteRemoteFile(haproxyInfo, pemChainCertBPath)\n\t\tuploadFile(haproxyInfo, extCrtList, extCrtListPath)\n\t\tdefer deleteRemoteFile(haproxyInfo, extCrtListPath)\n\n\t\tBy(\"Reloading HAProxy\")\n\t\treloadHAProxy(haproxyInfo)\n\n\t\tBy(\"Waiting for HAProxy to start listening (up to two minutes)\")\n\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\tBy(\"Sending a request to HAProxy using internal cert A works (default cert)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_a.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert B works (external cert now added)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_b.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert C fails (external cert not yet added)\")\n\t\t_, err = client.Get(\"https:\/\/cert_c.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_c.haproxy.internal\"))\n\n\t\tBy(\"Removing external cert B and adding externat cert C to external cert list\")\n\t\textCrtList = bytes.NewBufferString(fmt.Sprintf(\"%s cert_c.haproxy.internal\\n\", pemChainCertCPath))\n\n\t\tdeleteRemoteFile(haproxyInfo, pemChainCertBPath)\n\t\tuploadFile(haproxyInfo, pemChainCertC, pemChainCertCPath)\n\t\tdefer deleteRemoteFile(haproxyInfo, pemChainCertCPath)\n\t\tuploadFile(haproxyInfo, extCrtList, extCrtListPath)\n\n\t\tBy(\"Reloading HAProxy\")\n\t\treloadHAProxy(haproxyInfo)\n\n\t\tBy(\"Waiting for HAProxy to start listening (up to two minutes)\")\n\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\tBy(\"Sending a request to HAProxy using internal cert A works (default cert)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_a.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert B fails (external cert that was removed)\")\n\t\t_, err = client.Get(\"https:\/\/cert_b.haproxy.internal:443\")\n\t\tExpect(err).To(HaveOccurred())\n\t\tExpect(err.Error()).To(ContainSubstring(\"certificate is valid for cert_a.haproxy.internal, not cert_b.haproxy.internal\"))\n\n\t\tBy(\"Sending a request to HAProxy using external cert C works (external cert that was added)\")\n\t\tresp, err = client.Get(\"https:\/\/cert_c.haproxy.internal:443\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\t})\n\n\tContext(\"When ext_crt_list_policy is set to fail\", func() {\n\t\topfileExternalCertificatePolicyFail := `---\n# Ensure HAProxy is in daemon mode (syslog server cannot be stdout)\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/syslog_server?\n value: \"\/var\/vcap\/sys\/log\/haproxy\/syslog\"\n\n# Configure external certificate list properties\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list?\n value: true\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/ext_crt_list_policy?\n value: fail\n# crt_list or ssl_pem need to be non-nil for SSL to be enabled\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/name=haproxy\/properties\/ha_proxy\/crt_list?\n value: []\n`\n\n\t\tContext(\"When the external certificate does not exist\", func() {\n\t\t\tIt(\"Fails the deployment\", func() {\n\t\t\t\tdeployHAProxy(baseManifestVars{\n\t\t\t\t\thaproxyBackendPort: haproxyBackendPort,\n\t\t\t\t\thaproxyBackendServers: []string{\"127.0.0.1\"},\n\t\t\t\t\tdeploymentName: defaultDeploymentName,\n\t\t\t\t}, []string{opfileExternalCertificatePolicyFail}, map[string]interface{}{\n\t\t\t\t\t\"ext_crt_list_path\": \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/does-not-exist\",\n\t\t\t\t}, true)\n\t\t\t})\n\t\t})\n\n\t\tContext(\"When the external certificate does exist\", func() {\n\t\t\topsfileOSConfProvidedCertificate := `---\n# Configure os-conf to install \"external\" cert in pre-start script\n- type: replace\n path: \/instance_groups\/name=haproxy\/jobs\/-\n value:\n name: pre-start-script\n release: os-conf\n properties:\n script: |-\n #!\/bin\/bash\n mkdir -p \/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext\n\n # Write cert list\n echo '\/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext\/os-conf-cert haproxy.internal' > \/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext\/crt-list\n\n # Write cert chain\n echo '((cert.certificate))((cert.ca))((cert.private_key))' > \/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext\/os-conf-cert\n\n # Ensure HAProxy can read certs\n chown -R vcap:vcap \/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext\n# Generate CA and certificates\n- type: replace\n path: \/variables?\/-\n value:\n name: common_ca\n type: certificate\n options:\n is_ca: true\n common_name: bosh\n- type: replace\n path: \/variables?\/-\n value:\n name: cert\n type: certificate\n options:\n ca: common_ca\n common_name: haproxy.internal\n alternative_names: [haproxy.internal]\n`\n\n\t\t\tIt(\"Succesfully loads and uses the certificate\", func() {\n\t\t\t\thaproxyInfo, varsStoreReader := deployHAProxy(baseManifestVars{\n\t\t\t\t\thaproxyBackendPort: haproxyBackendPort,\n\t\t\t\t\thaproxyBackendServers: []string{\"127.0.0.1\"},\n\t\t\t\t\tdeploymentName: defaultDeploymentName,\n\t\t\t\t}, []string{opfileExternalCertificatePolicyFail, opsfileOSConfProvidedCertificate}, map[string]interface{}{\n\t\t\t\t\t\"ext_crt_list_path\": \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/cert-written-by-os-conf\",\n\t\t\t\t}, true)\n\n\t\t\t\t\/\/ Ensure file written by os-conf is cleaned up for next test\n\t\t\t\tdefer deleteRemoteFile(haproxyInfo, \"\/var\/vcap\/jobs\/haproxy\/config\/ssl\/ext\")\n\n\t\t\t\tvar creds struct {\n\t\t\t\t\tCert struct {\n\t\t\t\t\t\tCertificate string `yaml:\"certificate\"`\n\t\t\t\t\t\tCA string `yaml:\"ca\"`\n\t\t\t\t\t\tPrivateKey string `yaml:\"private_key\"`\n\t\t\t\t\t} `yaml:\"cert\"`\n\t\t\t\t}\n\t\t\t\terr := varsStoreReader(&creds)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\/\/ Wait for HAProxy to accept TCP connections\n\t\t\t\twaitForHAProxyListening(haproxyInfo)\n\n\t\t\t\tcloseLocalServer, localPort := startDefaultTestServer()\n\t\t\t\tdefer closeLocalServer()\n\n\t\t\t\tcloseTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort)\n\t\t\t\tdefer closeTunnel()\n\n\t\t\t\tclient := buildHTTPClient(\n\t\t\t\t\t[]string{creds.Cert.CA},\n\t\t\t\t\tmap[string]string{\"haproxy.internal:443\": fmt.Sprintf(\"%s:443\", haproxyInfo.PublicIP)},\n\t\t\t\t\t[]tls.Certificate{},\n\t\t\t\t)\n\n\t\t\t\tBy(\"Sending a request to HAProxy using the external cert\")\n\t\t\t\tresp, err := client.Get(\"https:\/\/haproxy.internal:443\")\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\t\t\t\tEventually(gbytes.BufferReader(resp.Body)).Should(gbytes.Say(\"Hello cloud foundry\"))\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc deleteRemoteFile(haproxyInfo haproxyInfo, remotePath string) {\n\t_, _, err := runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, fmt.Sprintf(\"sudo rm -rf %s\", remotePath))\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc uploadFile(haproxyInfo haproxyInfo, contents io.Reader, remotePath string) {\n\t\/\/ Due to permissions issues with the SCP library\n\t\/\/ we need to upload to the tmp dir first, then copy to the intended directory\n\t\/\/ Finally chown to VCAP user to BOSH processes have permissions to read\/write the file\n\tbasename := filepath.Base(remotePath)\n\ttmpRemotePath := fmt.Sprintf(\"\/tmp\/%s\", basename)\n\n\terr := copyFileToRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, tmpRemotePath, contents, \"0777\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, _, err = runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, fmt.Sprintf(\"sudo mv %s %s\", tmpRemotePath, remotePath))\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, _, err = runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, fmt.Sprintf(\"sudo chown vcap:vcap %s\", remotePath))\n\tExpect(err).NotTo(HaveOccurred())\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\n\/\/ Cluster represents high-level information about a LXD cluster.\n\/\/\n\/\/ API extension: clustering\ntype Cluster struct {\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\tEnabled bool `json:\"enabled\" yaml:\"enabled\"`\n\n\t\/\/ API extension: clustering_join\n\tMemberConfig []ClusterMemberConfigKey `json:\"member_config\" yaml:\"member_config\"`\n}\n\n\/\/ ClusterMemberConfigKey represents a single config key that a new member of\n\/\/ the cluster is required to provide when joining.\n\/\/\n\/\/ The Value field is empty when getting clustering information with GET\n\/\/ \/1.0\/cluster, and should be filled by the joining node when performing a PUT\n\/\/ \/1.0\/cluster join request.\n\/\/\n\/\/ API extension: clustering_join\ntype ClusterMemberConfigKey struct {\n\tEntity string `json:\"entity\" yaml:\"entity\"`\n\tName string `json:\"name\" yaml:\"name\"`\n\tKey string `json:\"key\" yaml:\"key\"`\n\tValue string `json:\"value\" yaml:\"value\"`\n\tDescription string `json:\"description\" yaml:\"description\"`\n}\n\n\/\/ ClusterPut represents the fields required to bootstrap or join a LXD\n\/\/ cluster.\n\/\/\n\/\/ API extension: clustering\ntype ClusterPut struct {\n\tCluster `yaml:\",inline\"`\n\tClusterAddress string `json:\"cluster_address\" yaml:\"cluster_address\"`\n\tClusterCertificate string `json:\"cluster_certificate\" yaml:\"cluster_certificate\"`\n\n\t\/\/ API extension: clustering_join\n\tServerAddress string `json:\"server_address\" yaml:\"server_address\"`\n\tClusterPassword string `json:\"cluster_password\" yaml:\"cluster_password\"`\n}\n\n\/\/ ClusterMemberPost represents the fields required to rename a LXD node.\n\/\/\n\/\/ API extension: clustering\ntype ClusterMemberPost struct {\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n}\n\n\/\/ ClusterMember represents the a LXD node in the cluster.\n\/\/\n\/\/ API extension: clustering\ntype ClusterMember struct {\n\tClusterMemberPut `yaml:\",inline\"`\n\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\tURL string `json:\"url\" yaml:\"url\"`\n\tDatabase bool `json:\"database\" yaml:\"database\"`\n\tStatus string `json:\"status\" yaml:\"status\"`\n\tMessage string `json:\"message\" yaml:\"message\"`\n}\n\n\/\/ Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields)\nfunc (member *ClusterMember) Writable() ClusterMemberPut {\n\treturn member.ClusterMemberPut\n}\n\n\/\/ ClusterMemberPut represents the the modifiable fields of a LXD cluster member\n\/\/\n\/\/ API extension: clustering_edit_roles\ntype ClusterMemberPut struct {\n\t\/\/ API extension: clustering_roles\n\tRoles []string `json:\"roles\" yaml:\"roles\"`\n\n\t\/\/ API extension: clustering_architecture\n\tArchitecture string `json:\"architecture\" yaml:\"architecture\"`\n}\n<commit_msg>shared\/api: Add FailureDomain field to ClusterMemberPut<commit_after>package api\n\n\/\/ Cluster represents high-level information about a LXD cluster.\n\/\/\n\/\/ API extension: clustering\ntype Cluster struct {\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\tEnabled bool `json:\"enabled\" yaml:\"enabled\"`\n\n\t\/\/ API extension: clustering_join\n\tMemberConfig []ClusterMemberConfigKey `json:\"member_config\" yaml:\"member_config\"`\n}\n\n\/\/ ClusterMemberConfigKey represents a single config key that a new member of\n\/\/ the cluster is required to provide when joining.\n\/\/\n\/\/ The Value field is empty when getting clustering information with GET\n\/\/ \/1.0\/cluster, and should be filled by the joining node when performing a PUT\n\/\/ \/1.0\/cluster join request.\n\/\/\n\/\/ API extension: clustering_join\ntype ClusterMemberConfigKey struct {\n\tEntity string `json:\"entity\" yaml:\"entity\"`\n\tName string `json:\"name\" yaml:\"name\"`\n\tKey string `json:\"key\" yaml:\"key\"`\n\tValue string `json:\"value\" yaml:\"value\"`\n\tDescription string `json:\"description\" yaml:\"description\"`\n}\n\n\/\/ ClusterPut represents the fields required to bootstrap or join a LXD\n\/\/ cluster.\n\/\/\n\/\/ API extension: clustering\ntype ClusterPut struct {\n\tCluster `yaml:\",inline\"`\n\tClusterAddress string `json:\"cluster_address\" yaml:\"cluster_address\"`\n\tClusterCertificate string `json:\"cluster_certificate\" yaml:\"cluster_certificate\"`\n\n\t\/\/ API extension: clustering_join\n\tServerAddress string `json:\"server_address\" yaml:\"server_address\"`\n\tClusterPassword string `json:\"cluster_password\" yaml:\"cluster_password\"`\n}\n\n\/\/ ClusterMemberPost represents the fields required to rename a LXD node.\n\/\/\n\/\/ API extension: clustering\ntype ClusterMemberPost struct {\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n}\n\n\/\/ ClusterMember represents the a LXD node in the cluster.\n\/\/\n\/\/ API extension: clustering\ntype ClusterMember struct {\n\tClusterMemberPut `yaml:\",inline\"`\n\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\tURL string `json:\"url\" yaml:\"url\"`\n\tDatabase bool `json:\"database\" yaml:\"database\"`\n\tStatus string `json:\"status\" yaml:\"status\"`\n\tMessage string `json:\"message\" yaml:\"message\"`\n}\n\n\/\/ Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields)\nfunc (member *ClusterMember) Writable() ClusterMemberPut {\n\treturn member.ClusterMemberPut\n}\n\n\/\/ ClusterMemberPut represents the the modifiable fields of a LXD cluster member\n\/\/\n\/\/ API extension: clustering_edit_roles\ntype ClusterMemberPut struct {\n\t\/\/ API extension: clustering_roles\n\tRoles []string `json:\"roles\" yaml:\"roles\"`\n\n\t\/\/ API extension: clustering_architecture\n\tArchitecture string `json:\"architecture\" yaml:\"architecture\"`\n\n\t\/\/ API extension: clustering_failure_domains\n\tFailureDomain string `json:\"failure_domain\" yaml:\"failure_domain\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\nimport (\n\t\"os\"\n\t\"strconv\"\n)\n\n\/\/ APIVersion contains the API base version. Only bumped for backward incompatible changes.\nvar APIVersion = \"1.0\"\n\n\/\/ APIExtensions is the list of all API extensions in the order they were added.\n\/\/\n\/\/ The following kind of changes come with a new extensions:\n\/\/\n\/\/ - New configuration key\n\/\/ - New valid values for a configuration key\n\/\/ - New REST API endpoint\n\/\/ - New argument inside an existing REST API call\n\/\/ - New HTTPs authentication mechanisms or protocols\n\/\/\n\/\/ This list is used mainly by the LXD server code, but it's in the shared\n\/\/ package as well for reference.\nvar APIExtensions = []string{\n\t\"storage_zfs_remove_snapshots\",\n\t\"container_host_shutdown_timeout\",\n\t\"container_stop_priority\",\n\t\"container_syscall_filtering\",\n\t\"auth_pki\",\n\t\"container_last_used_at\",\n\t\"etag\",\n\t\"patch\",\n\t\"usb_devices\",\n\t\"https_allowed_credentials\",\n\t\"image_compression_algorithm\",\n\t\"directory_manipulation\",\n\t\"container_cpu_time\",\n\t\"storage_zfs_use_refquota\",\n\t\"storage_lvm_mount_options\",\n\t\"network\",\n\t\"profile_usedby\",\n\t\"container_push\",\n\t\"container_exec_recording\",\n\t\"certificate_update\",\n\t\"container_exec_signal_handling\",\n\t\"gpu_devices\",\n\t\"container_image_properties\",\n\t\"migration_progress\",\n\t\"id_map\",\n\t\"network_firewall_filtering\",\n\t\"network_routes\",\n\t\"storage\",\n\t\"file_delete\",\n\t\"file_append\",\n\t\"network_dhcp_expiry\",\n\t\"storage_lvm_vg_rename\",\n\t\"storage_lvm_thinpool_rename\",\n\t\"network_vlan\",\n\t\"image_create_aliases\",\n\t\"container_stateless_copy\",\n\t\"container_only_migration\",\n\t\"storage_zfs_clone_copy\",\n\t\"unix_device_rename\",\n\t\"storage_lvm_use_thinpool\",\n\t\"storage_rsync_bwlimit\",\n\t\"network_vxlan_interface\",\n\t\"storage_btrfs_mount_options\",\n\t\"entity_description\",\n\t\"image_force_refresh\",\n\t\"storage_lvm_lv_resizing\",\n\t\"id_map_base\",\n\t\"file_symlinks\",\n\t\"container_push_target\",\n\t\"network_vlan_physical\",\n\t\"storage_images_delete\",\n\t\"container_edit_metadata\",\n\t\"container_snapshot_stateful_migration\",\n\t\"storage_driver_ceph\",\n\t\"storage_ceph_user_name\",\n\t\"resource_limits\",\n\t\"storage_volatile_initial_source\",\n\t\"storage_ceph_force_osd_reuse\",\n\t\"storage_block_filesystem_btrfs\",\n\t\"resources\",\n\t\"kernel_limits\",\n\t\"storage_api_volume_rename\",\n\t\"macaroon_authentication\",\n\t\"network_sriov\",\n\t\"console\",\n\t\"restrict_devlxd\",\n\t\"migration_pre_copy\",\n\t\"infiniband\",\n\t\"maas_network\",\n\t\"devlxd_events\",\n\t\"proxy\",\n\t\"network_dhcp_gateway\",\n\t\"file_get_symlink\",\n\t\"network_leases\",\n\t\"unix_device_hotplug\",\n\t\"storage_api_local_volume_handling\",\n\t\"operation_description\",\n\t\"clustering\",\n\t\"event_lifecycle\",\n\t\"storage_api_remote_volume_handling\",\n\t\"nvidia_runtime\",\n\t\"container_mount_propagation\",\n\t\"container_backup\",\n\t\"devlxd_images\",\n\t\"container_local_cross_pool_handling\",\n\t\"proxy_unix\",\n\t\"proxy_udp\",\n\t\"clustering_join\",\n\t\"proxy_tcp_udp_multi_port_handling\",\n\t\"network_state\",\n\t\"proxy_unix_dac_properties\",\n\t\"container_protection_delete\",\n\t\"unix_priv_drop\",\n\t\"pprof_http\",\n\t\"proxy_haproxy_protocol\",\n\t\"network_hwaddr\",\n\t\"proxy_nat\",\n\t\"network_nat_order\",\n\t\"container_full\",\n\t\"candid_authentication\",\n\t\"backup_compression\",\n\t\"candid_config\",\n\t\"nvidia_runtime_config\",\n\t\"storage_api_volume_snapshots\",\n\t\"storage_unmapped\",\n}\n\n\/\/ APIExtensionsCount returns the number of available API extensions.\nfunc APIExtensionsCount() int {\n\tcount := len(APIExtensions)\n\n\t\/\/ This environment variable is an internal one to force the code\n\t\/\/ to believe that we an API extensions version greater than we\n\t\/\/ actually have. It's used by integration tests to exercise the\n\t\/\/ cluster upgrade process.\n\tartificialBump := os.Getenv(\"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS\")\n\tif artificialBump != \"\" {\n\t\tn, err := strconv.Atoi(artificialBump)\n\t\tif err == nil {\n\t\t\tcount += n\n\t\t}\n\t}\n\n\treturn count\n}\n<commit_msg>shared\/version: Project API extension<commit_after>package version\n\nimport (\n\t\"os\"\n\t\"strconv\"\n)\n\n\/\/ APIVersion contains the API base version. Only bumped for backward incompatible changes.\nvar APIVersion = \"1.0\"\n\n\/\/ APIExtensions is the list of all API extensions in the order they were added.\n\/\/\n\/\/ The following kind of changes come with a new extensions:\n\/\/\n\/\/ - New configuration key\n\/\/ - New valid values for a configuration key\n\/\/ - New REST API endpoint\n\/\/ - New argument inside an existing REST API call\n\/\/ - New HTTPs authentication mechanisms or protocols\n\/\/\n\/\/ This list is used mainly by the LXD server code, but it's in the shared\n\/\/ package as well for reference.\nvar APIExtensions = []string{\n\t\"storage_zfs_remove_snapshots\",\n\t\"container_host_shutdown_timeout\",\n\t\"container_stop_priority\",\n\t\"container_syscall_filtering\",\n\t\"auth_pki\",\n\t\"container_last_used_at\",\n\t\"etag\",\n\t\"patch\",\n\t\"usb_devices\",\n\t\"https_allowed_credentials\",\n\t\"image_compression_algorithm\",\n\t\"directory_manipulation\",\n\t\"container_cpu_time\",\n\t\"storage_zfs_use_refquota\",\n\t\"storage_lvm_mount_options\",\n\t\"network\",\n\t\"profile_usedby\",\n\t\"container_push\",\n\t\"container_exec_recording\",\n\t\"certificate_update\",\n\t\"container_exec_signal_handling\",\n\t\"gpu_devices\",\n\t\"container_image_properties\",\n\t\"migration_progress\",\n\t\"id_map\",\n\t\"network_firewall_filtering\",\n\t\"network_routes\",\n\t\"storage\",\n\t\"file_delete\",\n\t\"file_append\",\n\t\"network_dhcp_expiry\",\n\t\"storage_lvm_vg_rename\",\n\t\"storage_lvm_thinpool_rename\",\n\t\"network_vlan\",\n\t\"image_create_aliases\",\n\t\"container_stateless_copy\",\n\t\"container_only_migration\",\n\t\"storage_zfs_clone_copy\",\n\t\"unix_device_rename\",\n\t\"storage_lvm_use_thinpool\",\n\t\"storage_rsync_bwlimit\",\n\t\"network_vxlan_interface\",\n\t\"storage_btrfs_mount_options\",\n\t\"entity_description\",\n\t\"image_force_refresh\",\n\t\"storage_lvm_lv_resizing\",\n\t\"id_map_base\",\n\t\"file_symlinks\",\n\t\"container_push_target\",\n\t\"network_vlan_physical\",\n\t\"storage_images_delete\",\n\t\"container_edit_metadata\",\n\t\"container_snapshot_stateful_migration\",\n\t\"storage_driver_ceph\",\n\t\"storage_ceph_user_name\",\n\t\"resource_limits\",\n\t\"storage_volatile_initial_source\",\n\t\"storage_ceph_force_osd_reuse\",\n\t\"storage_block_filesystem_btrfs\",\n\t\"resources\",\n\t\"kernel_limits\",\n\t\"storage_api_volume_rename\",\n\t\"macaroon_authentication\",\n\t\"network_sriov\",\n\t\"console\",\n\t\"restrict_devlxd\",\n\t\"migration_pre_copy\",\n\t\"infiniband\",\n\t\"maas_network\",\n\t\"devlxd_events\",\n\t\"proxy\",\n\t\"network_dhcp_gateway\",\n\t\"file_get_symlink\",\n\t\"network_leases\",\n\t\"unix_device_hotplug\",\n\t\"storage_api_local_volume_handling\",\n\t\"operation_description\",\n\t\"clustering\",\n\t\"event_lifecycle\",\n\t\"storage_api_remote_volume_handling\",\n\t\"nvidia_runtime\",\n\t\"container_mount_propagation\",\n\t\"container_backup\",\n\t\"devlxd_images\",\n\t\"container_local_cross_pool_handling\",\n\t\"proxy_unix\",\n\t\"proxy_udp\",\n\t\"clustering_join\",\n\t\"proxy_tcp_udp_multi_port_handling\",\n\t\"network_state\",\n\t\"proxy_unix_dac_properties\",\n\t\"container_protection_delete\",\n\t\"unix_priv_drop\",\n\t\"pprof_http\",\n\t\"proxy_haproxy_protocol\",\n\t\"network_hwaddr\",\n\t\"proxy_nat\",\n\t\"network_nat_order\",\n\t\"container_full\",\n\t\"candid_authentication\",\n\t\"backup_compression\",\n\t\"candid_config\",\n\t\"nvidia_runtime_config\",\n\t\"storage_api_volume_snapshots\",\n\t\"storage_unmapped\",\n\t\"projects\",\n}\n\n\/\/ APIExtensionsCount returns the number of available API extensions.\nfunc APIExtensionsCount() int {\n\tcount := len(APIExtensions)\n\n\t\/\/ This environment variable is an internal one to force the code\n\t\/\/ to believe that we an API extensions version greater than we\n\t\/\/ actually have. It's used by integration tests to exercise the\n\t\/\/ cluster upgrade process.\n\tartificialBump := os.Getenv(\"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS\")\n\tif artificialBump != \"\" {\n\t\tn, err := strconv.Atoi(artificialBump)\n\t\tif err == nil {\n\t\t\tcount += n\n\t\t}\n\t}\n\n\treturn count\n}\n<|endoftext|>"} {"text":"<commit_before>package rpc\n\nimport (\n\tatlantis \"atlantis\/common\"\n\t. \"atlantis\/manager\/constant\"\n\t\"atlantis\/manager\/crypto\"\n\t\"atlantis\/manager\/datamodel\"\n\t\"atlantis\/manager\/manager\"\n\t\"atlantis\/manager\/supervisor\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype ManagerRPC bool\n\nvar (\n\tlAddr string\n\tlPort string\n\tl net.Listener\n\tserver *rpc.Server\n\tconfig *tls.Config\n\tCPUSharesIncrement = uint(1) \/\/ default to no increment\n\tMemoryLimitIncrement = uint(1) \/\/ default to no increment\n)\n\nfunc Init(listenAddr string, supervisorPort uint16, cpuIncr, memIncr uint, resDuration time.Duration) error {\n\tvar err error\n\terr = LoadEnvs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tCPUSharesIncrement = cpuIncr\n\tMemoryLimitIncrement = memIncr\n\tatlantis.Tracker.ResultDuration = resDuration\n\t\/\/ init rpc stuff here\n\tlAddr = listenAddr\n\tlPort = strings.Split(lAddr, \":\")[1]\n\tsupervisor.Init(fmt.Sprintf(\"%d\", supervisorPort))\n\tmanager.Init(lPort)\n\tmanager := new(ManagerRPC)\n\tserver = rpc.NewServer()\n\tserver.Register(manager)\n\tconfig := &tls.Config{}\n\tconfig.InsecureSkipVerify = true\n\tconfig.Certificates = make([]tls.Certificate, 1)\n\tconfig.Certificates[0], err = tls.X509KeyPair(crypto.SERVER_CERT, crypto.SERVER_KEY)\n\n\tl, err = tls.Listen(\"tcp\", lAddr, config)\n\treturn err\n}\n\nfunc Listen() {\n\tif l == nil {\n\t\tpanic(\"Not Initialized.\")\n\t}\n\tlog.Println(\"[RPC] Listening on\", lAddr)\n\tserver.Accept(l)\n}\n\nfunc checkRole(role string, rType string) error {\n\tlog.Printf(\"[CheckRole] checking myself (%s:%s) for %s:%s\", Region, Host, rType, role)\n\tzkManager, err := datamodel.GetManager(Region, Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[CheckRole] roles: %v\", zkManager.Roles)\n\tif !zkManager.HasRole(role, rType) {\n\t\tlog.Printf(\"[CheckRole] role check fail.\")\n\t\tmanagersWithRole := \"\"\n\t\tmanagers, err := datamodel.ListManagers()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor region, rManagers := range managers {\n\t\t\tfor _, manager := range rManagers {\n\t\t\t\tzm, err := datamodel.GetManager(region, manager)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif zm.HasRole(role, rType) {\n\t\t\t\t\tmanagersWithRole = managersWithRole + zm.ManagerCName + \"\\n\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn errors.New(fmt.Sprintf(\"This manager does not have the ability to %s %s. \"+\n\t\t\t\"Please try one of these:\\n%s\", rType, role, managersWithRole))\n\t}\n\tlog.Printf(\"[CheckRole] role check success.\")\n\treturn nil\n}\n<commit_msg>register self<commit_after>package rpc\n\nimport (\n\tatlantis \"atlantis\/common\"\n\t. \"atlantis\/manager\/constant\"\n\t\"atlantis\/manager\/crypto\"\n\t\"atlantis\/manager\/datamodel\"\n\t\"atlantis\/manager\/manager\"\n\t\"atlantis\/manager\/supervisor\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype ManagerRPC bool\n\nvar (\n\tlAddr string\n\tlPort string\n\tl net.Listener\n\tserver *rpc.Server\n\tconfig *tls.Config\n\tCPUSharesIncrement = uint(1) \/\/ default to no increment\n\tMemoryLimitIncrement = uint(1) \/\/ default to no increment\n)\n\nfunc Init(listenAddr string, supervisorPort uint16, cpuIncr, memIncr uint, resDuration time.Duration) error {\n\tvar err error\n\terr = LoadEnvs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tCPUSharesIncrement = cpuIncr\n\tMemoryLimitIncrement = memIncr\n\tatlantis.Tracker.ResultDuration = resDuration\n\t\/\/ init rpc stuff here\n\tlAddr = listenAddr\n\tlPort = strings.Split(lAddr, \":\")[1]\n\tsupervisor.Init(fmt.Sprintf(\"%d\", supervisorPort))\n\tmanager.Init(lPort)\n\tmanager := new(ManagerRPC)\n\tserver = rpc.NewServer()\n\tserver.Register(manager)\n\tconfig := &tls.Config{}\n\tconfig.InsecureSkipVerify = true\n\tconfig.Certificates = make([]tls.Certificate, 1)\n\tconfig.Certificates[0], err = tls.X509KeyPair(crypto.SERVER_CERT, crypto.SERVER_KEY)\n\n\tl, err = tls.Listen(\"tcp\", lAddr, config)\n\treturn err\n}\n\nfunc Listen() {\n\tgo selfRegister()\n\tif l == nil {\n\t\tpanic(\"Not Initialized.\")\n\t}\n\tlog.Println(\"[RPC] Listening on\", lAddr)\n\tserver.Accept(l)\n}\n\nfunc selfRegister() {\n\tlog.Println(\"[SelfRegister] Registering Self.\")\n\tzkManager, err := datamodel.GetManager(Region, Host)\n\tif err == nil && zkManager != nil {\n\t\t\/\/ i'm already registered\n\t\tlog.Println(\"[SelfRegister] Already Registered\")\n\t\treturn\n\t}\n\tmgr, err := manager.Register(Region, Host, \"\", \"\")\n\tif err != nil {\n\t\tlog.Fatalln(\"[SelfRegister] Failure: \", err)\n\t}\n\tlog.Printf(\"[SelfRegister] Success: %s\", mgr.ManagerCName)\n}\n\nfunc checkRole(role string, rType string) error {\n\tlog.Printf(\"[CheckRole] checking myself (%s:%s) for %s:%s\", Region, Host, rType, role)\n\tzkManager, err := datamodel.GetManager(Region, Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[CheckRole] roles: %v\", zkManager.Roles)\n\tif !zkManager.HasRole(role, rType) {\n\t\tlog.Printf(\"[CheckRole] role check fail.\")\n\t\tmanagersWithRole := \"\"\n\t\tmanagers, err := datamodel.ListManagers()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor region, rManagers := range managers {\n\t\t\tfor _, manager := range rManagers {\n\t\t\t\tzm, err := datamodel.GetManager(region, manager)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif zm.HasRole(role, rType) {\n\t\t\t\t\tmanagersWithRole = managersWithRole + zm.ManagerCName + \"\\n\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn errors.New(fmt.Sprintf(\"This manager does not have the ability to %s %s. \"+\n\t\t\t\"Please try one of these:\\n%s\", rType, role, managersWithRole))\n\t}\n\tlog.Printf(\"[CheckRole] role check success.\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package app_feature_impl\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/watermint\/toolbox\/essentials\/log\/es_log\"\n\t\"github.com\/watermint\/toolbox\/infra\/app\"\n\t\"github.com\/watermint\/toolbox\/infra\/control\/app_feature\"\n\t\"github.com\/watermint\/toolbox\/infra\/control\/app_opt\"\n\t\"github.com\/watermint\/toolbox\/infra\/control\/app_workspace\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tConfigFileName = \"config.json\"\n)\n\nvar (\n\tErrorValueNotFound = errors.New(\"value not found\")\n)\n\nfunc NewFeature(opts app_opt.CommonOpts, ws app_workspace.Workspace) app_feature.Feature {\n\treturn &featureImpl{\n\t\tcom: opts,\n\t\tws: ws,\n\t}\n}\n\ntype featureImpl struct {\n\tcom app_opt.CommonOpts\n\tws app_workspace.Workspace\n\ttest bool\n\ttestWithMock bool\n}\n\nfunc (z featureImpl) pathConfig() string {\n\treturn filepath.Join(z.ws.Home(), ConfigFileName)\n}\n\nfunc (z featureImpl) loadConfig() (values map[string]interface{}, err error) {\n\tvalues = make(map[string]interface{})\n\tl := es_log.Default()\n\tp := filepath.Join(z.pathConfig(), ConfigFileName)\n\n\t_, err = os.Lstat(p)\n\tif err != nil {\n\t\tl.Debug(\"No file information; skip loading\", es_log.Error(err))\n\t\treturn values, nil\n\t}\n\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tb, err := ioutil.ReadFile(p)\n\tif err != nil {\n\t\tl.Debug(\"Unable to read config\", es_log.Error(err))\n\t\treturn\n\t}\n\tif err := json.Unmarshal(b, &values); err != nil {\n\t\tl.Debug(\"unable to unmarshal\", es_log.Error(err))\n\t\treturn values, err\n\t}\n\treturn\n}\n\nfunc (z featureImpl) getConfig(key string) (v interface{}, err error) {\n\tif values, err := z.loadConfig(); err != nil {\n\t\treturn nil, err\n\t} else if v, ok := values[key]; ok {\n\t\treturn v, nil\n\t} else {\n\t\treturn nil, ErrorValueNotFound\n\t}\n}\n\nfunc (z featureImpl) saveConfig(key string, v interface{}) (err error) {\n\tl := es_log.Default()\n\tp := z.pathConfig()\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tvalues, err := z.loadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalues[key] = v\n\n\tb, err := json.Marshal(values)\n\tif err != nil {\n\t\tl.Debug(\"Unable to marshal\", es_log.Error(err))\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(p, b, 0644); err != nil {\n\t\tl.Debug(\"Unable to write config\", es_log.Error(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z featureImpl) ConsoleLogLevel() es_log.Level {\n\treturn app_feature.ConsoleLogLevel(z.test, z.com.Debug)\n}\n\nfunc (z featureImpl) AsTest(useMock bool) app_feature.Feature {\n\tz.test = true\n\tz.testWithMock = useMock\n\treturn &z\n}\n\nfunc (z featureImpl) AsQuiet() app_feature.Feature {\n\tz.com.Quiet = true\n\treturn &z\n}\n\nfunc (z featureImpl) OptInGet(oi app_feature.OptIn) (f app_feature.OptIn, found bool) {\n\tl := es_log.Default()\n\tkey := oi.OptInName(oi)\n\tl.Debug(\"OptInGet\", es_log.String(\"key\", key))\n\tif v, err := z.getConfig(key); err != nil {\n\t\tl.Debug(\"The key not found in the current config\", es_log.Error(err))\n\t\treturn oi, false\n\t} else if mv, ok := v.(map[string]interface{}); ok {\n\t\tif err := app_feature.OptInFrom(mv, oi); err != nil {\n\t\t\tl.Debug(\"The value is not a opt-in format\", es_log.Error(err))\n\t\t\treturn oi, false\n\t\t}\n\t}\n\treturn oi, true\n}\n\nfunc (z featureImpl) OptInUpdate(oi app_feature.OptIn) error {\n\tl := es_log.Default()\n\tkey := oi.OptInName(oi)\n\tl = l.With(es_log.String(\"key\", key))\n\tl.Debug(\"OptInUpdate\")\n\tif err := z.saveConfig(key, oi); err != nil {\n\t\tl.Debug(\"Failed to update opt-in\", es_log.Error(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z featureImpl) IsTestWithMock() bool {\n\treturn z.testWithMock\n}\n\nfunc (z featureImpl) Home() string {\n\treturn z.com.Workspace.Value()\n}\n\nfunc (z featureImpl) BudgetMemory() string {\n\treturn z.com.BudgetMemory.Value()\n}\n\nfunc (z featureImpl) BudgetStorage() string {\n\treturn z.com.BudgetStorage.Value()\n}\n\nfunc (z featureImpl) Concurrency() int {\n\treturn z.com.Concurrency\n}\n\nfunc (z featureImpl) IsProduction() bool {\n\treturn app.IsProduction()\n}\n\nfunc (z featureImpl) IsDebug() bool {\n\treturn z.com.Debug\n}\n\nfunc (z featureImpl) IsTest() bool {\n\treturn z.test\n}\n\nfunc (z featureImpl) IsQuiet() bool {\n\treturn z.com.Quiet\n}\n\nfunc (z featureImpl) IsSecure() bool {\n\treturn z.com.Secure\n}\n\nfunc (z featureImpl) IsLowMemory() bool {\n\treturn z.com.BudgetMemory.Value() == app_opt.BudgetLow\n}\n\nfunc (z featureImpl) IsAutoOpen() bool {\n\treturn z.com.AutoOpen\n}\n\nfunc (z featureImpl) UIFormat() string {\n\treturn z.com.Output.Value()\n}\n<commit_msg>#360 : incorrect path pointed<commit_after>package app_feature_impl\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/watermint\/toolbox\/essentials\/log\/es_log\"\n\t\"github.com\/watermint\/toolbox\/infra\/app\"\n\t\"github.com\/watermint\/toolbox\/infra\/control\/app_feature\"\n\t\"github.com\/watermint\/toolbox\/infra\/control\/app_opt\"\n\t\"github.com\/watermint\/toolbox\/infra\/control\/app_workspace\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tConfigFileName = \"config.json\"\n)\n\nvar (\n\tErrorValueNotFound = errors.New(\"value not found\")\n)\n\nfunc NewFeature(opts app_opt.CommonOpts, ws app_workspace.Workspace) app_feature.Feature {\n\treturn &featureImpl{\n\t\tcom: opts,\n\t\tws: ws,\n\t}\n}\n\ntype featureImpl struct {\n\tcom app_opt.CommonOpts\n\tws app_workspace.Workspace\n\ttest bool\n\ttestWithMock bool\n}\n\nfunc (z featureImpl) pathConfig() string {\n\treturn filepath.Join(z.ws.Home(), ConfigFileName)\n}\n\nfunc (z featureImpl) loadConfig() (values map[string]interface{}, err error) {\n\tvalues = make(map[string]interface{})\n\tl := es_log.Default()\n\tp := z.pathConfig()\n\n\t_, err = os.Lstat(p)\n\tif err != nil {\n\t\tl.Debug(\"No file information; skip loading\", es_log.Error(err))\n\t\treturn values, nil\n\t}\n\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tb, err := ioutil.ReadFile(p)\n\tif err != nil {\n\t\tl.Debug(\"Unable to read config\", es_log.Error(err))\n\t\treturn\n\t}\n\tif err := json.Unmarshal(b, &values); err != nil {\n\t\tl.Debug(\"unable to unmarshal\", es_log.Error(err))\n\t\treturn values, err\n\t}\n\treturn\n}\n\nfunc (z featureImpl) getConfig(key string) (v interface{}, err error) {\n\tif values, err := z.loadConfig(); err != nil {\n\t\treturn nil, err\n\t} else if v, ok := values[key]; ok {\n\t\treturn v, nil\n\t} else {\n\t\treturn nil, ErrorValueNotFound\n\t}\n}\n\nfunc (z featureImpl) saveConfig(key string, v interface{}) (err error) {\n\tl := es_log.Default()\n\tp := z.pathConfig()\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tvalues, err := z.loadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalues[key] = v\n\n\tb, err := json.Marshal(values)\n\tif err != nil {\n\t\tl.Debug(\"Unable to marshal\", es_log.Error(err))\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(p, b, 0644); err != nil {\n\t\tl.Debug(\"Unable to write config\", es_log.Error(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z featureImpl) ConsoleLogLevel() es_log.Level {\n\treturn app_feature.ConsoleLogLevel(z.test, z.com.Debug)\n}\n\nfunc (z featureImpl) AsTest(useMock bool) app_feature.Feature {\n\tz.test = true\n\tz.testWithMock = useMock\n\treturn &z\n}\n\nfunc (z featureImpl) AsQuiet() app_feature.Feature {\n\tz.com.Quiet = true\n\treturn &z\n}\n\nfunc (z featureImpl) OptInGet(oi app_feature.OptIn) (f app_feature.OptIn, found bool) {\n\tl := es_log.Default()\n\tkey := oi.OptInName(oi)\n\tl.Debug(\"OptInGet\", es_log.String(\"key\", key))\n\tif v, err := z.getConfig(key); err != nil {\n\t\tl.Debug(\"The key not found in the current config\", es_log.Error(err))\n\t\treturn oi, false\n\t} else if mv, ok := v.(map[string]interface{}); ok {\n\t\tif err := app_feature.OptInFrom(mv, oi); err != nil {\n\t\t\tl.Debug(\"The value is not a opt-in format\", es_log.Error(err))\n\t\t\treturn oi, false\n\t\t}\n\t}\n\treturn oi, true\n}\n\nfunc (z featureImpl) OptInUpdate(oi app_feature.OptIn) error {\n\tl := es_log.Default()\n\tkey := oi.OptInName(oi)\n\tl = l.With(es_log.String(\"key\", key))\n\tl.Debug(\"OptInUpdate\")\n\tif err := z.saveConfig(key, oi); err != nil {\n\t\tl.Debug(\"Failed to update opt-in\", es_log.Error(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z featureImpl) IsTestWithMock() bool {\n\treturn z.testWithMock\n}\n\nfunc (z featureImpl) Home() string {\n\treturn z.com.Workspace.Value()\n}\n\nfunc (z featureImpl) BudgetMemory() string {\n\treturn z.com.BudgetMemory.Value()\n}\n\nfunc (z featureImpl) BudgetStorage() string {\n\treturn z.com.BudgetStorage.Value()\n}\n\nfunc (z featureImpl) Concurrency() int {\n\treturn z.com.Concurrency\n}\n\nfunc (z featureImpl) IsProduction() bool {\n\treturn app.IsProduction()\n}\n\nfunc (z featureImpl) IsDebug() bool {\n\treturn z.com.Debug\n}\n\nfunc (z featureImpl) IsTest() bool {\n\treturn z.test\n}\n\nfunc (z featureImpl) IsQuiet() bool {\n\treturn z.com.Quiet\n}\n\nfunc (z featureImpl) IsSecure() bool {\n\treturn z.com.Secure\n}\n\nfunc (z featureImpl) IsLowMemory() bool {\n\treturn z.com.BudgetMemory.Value() == app_opt.BudgetLow\n}\n\nfunc (z featureImpl) IsAutoOpen() bool {\n\treturn z.com.AutoOpen\n}\n\nfunc (z featureImpl) UIFormat() string {\n\treturn z.com.Output.Value()\n}\n<|endoftext|>"} {"text":"<commit_before>package integration_test\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\n\tintegration \"github.com\/cloudfoundry\/bosh-bootloader\/integration-test\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/integration-test\/actors\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/testhelpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/pivotal-cf-experimental\/bosh-test\/bosh\"\n)\n\nconst (\n\tConcourseExampleManifestURL = \"https:\/\/raw.githubusercontent.com\/concourse\/concourse\/master\/docs\/setting-up\/installing.any\"\n\tConcourseReleaseURL = \"https:\/\/bosh.io\/d\/github.com\/concourse\/concourse\"\n\tGardenReleaseURL = \"https:\/\/bosh.io\/d\/github.com\/cloudfoundry-incubator\/garden-runc-release\"\n\tGardenReleaseName = \"garden-runc\"\n\tAWSStemcellURL = \"https:\/\/bosh.io\/d\/stemcells\/bosh-aws-xen-hvm-ubuntu-trusty-go_agent\"\n\tAWSStemcellName = \"bosh-aws-xen-hvm-ubuntu-trusty-go_agent\"\n\tGCPStemcellURL = \"https:\/\/bosh.io\/d\/stemcells\/bosh-google-kvm-ubuntu-trusty-go_agent\"\n\tGCPStemcellName = \"bosh-google-kvm-ubuntu-trusty-go_agent\"\n)\n\nvar _ = Describe(\"concourse deployment test\", func() {\n\tvar deployConcourseTest = func(bbl actors.BBL, stemcellURL, stemcellName, lbURL string, tlsMode bool, tlsBindPort int) {\n\t\tboshClient := bosh.NewClient(bosh.Config{\n\t\t\tURL: bbl.DirectorAddress(),\n\t\t\tUsername: bbl.DirectorUsername(),\n\t\t\tPassword: bbl.DirectorPassword(),\n\t\t\tAllowInsecureSSL: true,\n\t\t})\n\n\t\terr := downloadAndUploadRelease(boshClient, ConcourseReleaseURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = downloadAndUploadRelease(boshClient, GardenReleaseURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = downloadAndUploadStemcell(boshClient, stemcellURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tconcourseExampleManifest, err := downloadConcourseExampleManifest()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tinfo, err := boshClient.Info()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tstemcell, err := boshClient.Stemcell(stemcellName)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tconcourseRelease, err := boshClient.Release(\"concourse\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tgardenRelease, err := boshClient.Release(GardenReleaseName)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tconcourseManifestInputs := concourseManifestInputs{\n\t\t\tboshDirectorUUID: info.UUID,\n\t\t\twebExternalURL: lbURL,\n\t\t\ttlsMode: tlsMode,\n\t\t\ttlsBindPort: tlsBindPort,\n\t\t\tstemcellVersion: stemcell.Latest(),\n\t\t\tconcourseReleaseVersion: concourseRelease.Latest(),\n\t\t\tgardenReleaseVersion: gardenRelease.Latest(),\n\t\t}\n\t\tconcourseManifest, err := populateManifest(concourseExampleManifest, concourseManifestInputs)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t_, err = boshClient.Deploy([]byte(concourseManifest))\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tEventually(func() ([]bosh.VM, error) {\n\t\t\treturn boshClient.DeploymentVMs(\"concourse\")\n\t\t}, \"1m\", \"10s\").Should(ConsistOf([]bosh.VM{\n\t\t\t{JobName: \"worker\", Index: 0, State: \"running\"},\n\t\t\t{JobName: \"db\", Index: 0, State: \"running\"},\n\t\t\t{JobName: \"web\", Index: 0, State: \"running\"},\n\t\t}))\n\n\t\ttr := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tclient := &http.Client{Transport: tr}\n\n\t\tresp, err := client.Get(lbURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tExpect(string(body)).To(ContainSubstring(\"<title>Concourse<\/title>\"))\n\n\t\terr = boshClient.DeleteDeployment(\"concourse\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tbbl.Destroy()\n\t}\n\n\tDescribe(\"aws\", func() {\n\t\tvar (\n\t\t\tbbl actors.BBL\n\t\t\taws actors.AWS\n\t\t\tstate integration.State\n\t\t\tlbURL string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tconfiguration, err := integration.LoadAWSConfig()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tbbl = actors.NewBBL(configuration.StateFileDir, pathToBBL, configuration, \"bbl-concourse-env\")\n\t\t\taws = actors.NewAWS(configuration)\n\t\t\tstate = integration.NewState(configuration.StateFileDir)\n\n\t\t\tbbl.Up(actors.AWSIAAS)\n\n\t\t\tcertPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_CERT)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tkeyPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_KEY)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tbbl.CreateLB(\"concourse\", certPath, keyPath, \"\")\n\n\t\t\tlbURL = fmt.Sprintf(\"http:\/\/%s\", aws.LoadBalancers(state.StackName())[\"ConcourseLoadBalancerURL\"])\n\t\t})\n\n\t\tIt(\"is able to deploy concourse\", func() {\n\t\t\tdeployConcourseTest(bbl, AWSStemcellURL, AWSStemcellName, lbURL, false, 0)\n\t\t})\n\t})\n\n\tDescribe(\"gcp\", func() {\n\t\tvar (\n\t\t\tbbl actors.BBL\n\t\t\tgcp actors.GCP\n\t\t\tstate integration.State\n\t\t\tlbURL string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tconfiguration, err := integration.LoadGCPConfig()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tbbl = actors.NewBBL(configuration.StateFileDir, pathToBBL, configuration, \"bbl-concourse-env\")\n\t\t\tgcp = actors.NewGCP(configuration)\n\t\t\tstate = integration.NewState(configuration.StateFileDir)\n\n\t\t\tbbl.Up(actors.GCPIAAS)\n\n\t\t\tbbl.CreateGCPLB(\"concourse\")\n\n\t\t\tenvID := bbl.EnvID()\n\t\t\taddress, err := gcp.GetAddress(envID + \"-concourse\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tlbURL = fmt.Sprintf(\"https:\/\/%s\", address.Address)\n\t\t})\n\n\t\tIt(\"is able to deploy concourse\", func() {\n\t\t\tdeployConcourseTest(bbl, GCPStemcellURL, GCPStemcellName, lbURL, true, 443)\n\t\t})\n\t})\n\n})\n\nfunc downloadAndUploadStemcell(boshClient bosh.Client, stemcell string) error {\n\tfile, size, err := download(stemcell)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = boshClient.UploadStemcell(bosh.NewSizeReader(file, size))\n\treturn err\n}\n\nfunc downloadAndUploadRelease(boshClient bosh.Client, release string) error {\n\tfile, size, err := download(release)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = boshClient.UploadRelease(bosh.NewSizeReader(file, size))\n\treturn err\n}\n\nfunc downloadConcourseExampleManifest() (string, error) {\n\tresp, _, err := download(ConcourseExampleManifestURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar lines []string\n\tscanner := bufio.NewScanner(resp)\n\tfor scanner.Scan() {\n\t\tlines = append(lines, scanner.Text())\n\t}\n\tstartIndexOfYamlCode := -1\n\tendIndexOfYamlCode := -1\n\n\tfor index, line := range lines {\n\t\tstartMatched, startErr := regexp.MatchString(`^\\s*\\\\codeblock{yaml}{$`, line)\n\t\tendMatched, endErr := regexp.MatchString(`^\\s*}$`, line)\n\t\tif endErr != nil {\n\t\t\treturn \"\", endErr\n\t\t}\n\n\t\tif startErr != nil {\n\t\t\tpanic(startErr)\n\t\t}\n\n\t\tif startMatched && startIndexOfYamlCode < 0 {\n\t\t\tstartIndexOfYamlCode = index + 1\n\t\t}\n\t\tif endMatched && endIndexOfYamlCode < 0 && startIndexOfYamlCode > 0 {\n\t\t\tendIndexOfYamlCode = index\n\t\t}\n\t}\n\n\tyamlDocument := lines[startIndexOfYamlCode:endIndexOfYamlCode]\n\n\tre := regexp.MustCompile(`^(\\s*)---`)\n\tresults := re.FindAllStringSubmatch(yamlDocument[0], -1)\n\tindentation := results[0][1]\n\tfor index, line := range yamlDocument {\n\t\tindentationRegexp := regexp.MustCompile(fmt.Sprintf(`^%s`, indentation))\n\t\tescapesRegexp := regexp.MustCompile(`\\\\([{}])`)\n\t\ttlsRegexp := regexp.MustCompile(\"^.*(tls_key|tls_cert).*$\")\n\n\t\tline = indentationRegexp.ReplaceAllString(line, \"\")\n\t\tline = escapesRegexp.ReplaceAllString(line, \"$1\")\n\t\tline = tlsRegexp.ReplaceAllString(line, \"\")\n\n\t\tyamlDocument[index] = line\n\n\t}\n\n\tyamlString := strings.Join(yamlDocument, \"\\n\")\n\treturn yamlString, nil\n}\n\nfunc download(location string) (io.Reader, int64, error) {\n\tresp, err := http.Get(location)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn resp.Body, resp.ContentLength, nil\n}\n<commit_msg>Fix concourse integration test bbl up call<commit_after>package integration_test\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\n\tintegration \"github.com\/cloudfoundry\/bosh-bootloader\/integration-test\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/integration-test\/actors\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/testhelpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/pivotal-cf-experimental\/bosh-test\/bosh\"\n)\n\nconst (\n\tConcourseExampleManifestURL = \"https:\/\/raw.githubusercontent.com\/concourse\/concourse\/master\/docs\/setting-up\/installing.any\"\n\tConcourseReleaseURL = \"https:\/\/bosh.io\/d\/github.com\/concourse\/concourse\"\n\tGardenReleaseURL = \"https:\/\/bosh.io\/d\/github.com\/cloudfoundry-incubator\/garden-runc-release\"\n\tGardenReleaseName = \"garden-runc\"\n\tAWSStemcellURL = \"https:\/\/bosh.io\/d\/stemcells\/bosh-aws-xen-hvm-ubuntu-trusty-go_agent\"\n\tAWSStemcellName = \"bosh-aws-xen-hvm-ubuntu-trusty-go_agent\"\n\tGCPStemcellURL = \"https:\/\/bosh.io\/d\/stemcells\/bosh-google-kvm-ubuntu-trusty-go_agent\"\n\tGCPStemcellName = \"bosh-google-kvm-ubuntu-trusty-go_agent\"\n)\n\nvar _ = Describe(\"concourse deployment test\", func() {\n\tvar deployConcourseTest = func(bbl actors.BBL, stemcellURL, stemcellName, lbURL string, tlsMode bool, tlsBindPort int) {\n\t\tboshClient := bosh.NewClient(bosh.Config{\n\t\t\tURL: bbl.DirectorAddress(),\n\t\t\tUsername: bbl.DirectorUsername(),\n\t\t\tPassword: bbl.DirectorPassword(),\n\t\t\tAllowInsecureSSL: true,\n\t\t})\n\n\t\terr := downloadAndUploadRelease(boshClient, ConcourseReleaseURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = downloadAndUploadRelease(boshClient, GardenReleaseURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = downloadAndUploadStemcell(boshClient, stemcellURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tconcourseExampleManifest, err := downloadConcourseExampleManifest()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tinfo, err := boshClient.Info()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tstemcell, err := boshClient.Stemcell(stemcellName)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tconcourseRelease, err := boshClient.Release(\"concourse\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tgardenRelease, err := boshClient.Release(GardenReleaseName)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tconcourseManifestInputs := concourseManifestInputs{\n\t\t\tboshDirectorUUID: info.UUID,\n\t\t\twebExternalURL: lbURL,\n\t\t\ttlsMode: tlsMode,\n\t\t\ttlsBindPort: tlsBindPort,\n\t\t\tstemcellVersion: stemcell.Latest(),\n\t\t\tconcourseReleaseVersion: concourseRelease.Latest(),\n\t\t\tgardenReleaseVersion: gardenRelease.Latest(),\n\t\t}\n\t\tconcourseManifest, err := populateManifest(concourseExampleManifest, concourseManifestInputs)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t_, err = boshClient.Deploy([]byte(concourseManifest))\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tEventually(func() ([]bosh.VM, error) {\n\t\t\treturn boshClient.DeploymentVMs(\"concourse\")\n\t\t}, \"1m\", \"10s\").Should(ConsistOf([]bosh.VM{\n\t\t\t{JobName: \"worker\", Index: 0, State: \"running\"},\n\t\t\t{JobName: \"db\", Index: 0, State: \"running\"},\n\t\t\t{JobName: \"web\", Index: 0, State: \"running\"},\n\t\t}))\n\n\t\ttr := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tclient := &http.Client{Transport: tr}\n\n\t\tresp, err := client.Get(lbURL)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tExpect(resp.StatusCode).To(Equal(http.StatusOK))\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tExpect(string(body)).To(ContainSubstring(\"<title>Concourse<\/title>\"))\n\n\t\terr = boshClient.DeleteDeployment(\"concourse\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tbbl.Destroy()\n\t}\n\n\tDescribe(\"aws\", func() {\n\t\tvar (\n\t\t\tbbl actors.BBL\n\t\t\taws actors.AWS\n\t\t\tstate integration.State\n\t\t\tlbURL string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tconfiguration, err := integration.LoadAWSConfig()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tbbl = actors.NewBBL(configuration.StateFileDir, pathToBBL, configuration, \"bbl-concourse-env\")\n\t\t\taws = actors.NewAWS(configuration)\n\t\t\tstate = integration.NewState(configuration.StateFileDir)\n\n\t\t\tbbl.Up(actors.AWSIAAS)\n\n\t\t\tcertPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_CERT)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tkeyPath, err := testhelpers.WriteContentsToTempFile(testhelpers.BBL_KEY)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tbbl.CreateLB(\"concourse\", certPath, keyPath, \"\")\n\n\t\t\tlbURL = fmt.Sprintf(\"http:\/\/%s\", aws.LoadBalancers(state.StackName())[\"ConcourseLoadBalancerURL\"])\n\t\t})\n\n\t\tIt(\"is able to deploy concourse\", func() {\n\t\t\tdeployConcourseTest(bbl, AWSStemcellURL, AWSStemcellName, lbURL, false, 0)\n\t\t})\n\t})\n\n\tDescribe(\"gcp\", func() {\n\t\tvar (\n\t\t\tbbl actors.BBL\n\t\t\tgcp actors.GCP\n\t\t\tstate integration.State\n\t\t\tlbURL string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tconfiguration, err := integration.LoadGCPConfig()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tbbl = actors.NewBBL(configuration.StateFileDir, pathToBBL, configuration, \"bbl-concourse-env\")\n\t\t\tgcp = actors.NewGCP(configuration)\n\t\t\tstate = integration.NewState(configuration.StateFileDir)\n\n\t\t\tbbl.Up(actors.GCPIAAS, true)\n\n\t\t\tbbl.CreateGCPLB(\"concourse\")\n\n\t\t\tenvID := bbl.EnvID()\n\t\t\taddress, err := gcp.GetAddress(envID + \"-concourse\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tlbURL = fmt.Sprintf(\"https:\/\/%s\", address.Address)\n\t\t})\n\n\t\tIt(\"is able to deploy concourse\", func() {\n\t\t\tdeployConcourseTest(bbl, GCPStemcellURL, GCPStemcellName, lbURL, true, 443)\n\t\t})\n\t})\n\n})\n\nfunc downloadAndUploadStemcell(boshClient bosh.Client, stemcell string) error {\n\tfile, size, err := download(stemcell)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = boshClient.UploadStemcell(bosh.NewSizeReader(file, size))\n\treturn err\n}\n\nfunc downloadAndUploadRelease(boshClient bosh.Client, release string) error {\n\tfile, size, err := download(release)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = boshClient.UploadRelease(bosh.NewSizeReader(file, size))\n\treturn err\n}\n\nfunc downloadConcourseExampleManifest() (string, error) {\n\tresp, _, err := download(ConcourseExampleManifestURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar lines []string\n\tscanner := bufio.NewScanner(resp)\n\tfor scanner.Scan() {\n\t\tlines = append(lines, scanner.Text())\n\t}\n\tstartIndexOfYamlCode := -1\n\tendIndexOfYamlCode := -1\n\n\tfor index, line := range lines {\n\t\tstartMatched, startErr := regexp.MatchString(`^\\s*\\\\codeblock{yaml}{$`, line)\n\t\tendMatched, endErr := regexp.MatchString(`^\\s*}$`, line)\n\t\tif endErr != nil {\n\t\t\treturn \"\", endErr\n\t\t}\n\n\t\tif startErr != nil {\n\t\t\tpanic(startErr)\n\t\t}\n\n\t\tif startMatched && startIndexOfYamlCode < 0 {\n\t\t\tstartIndexOfYamlCode = index + 1\n\t\t}\n\t\tif endMatched && endIndexOfYamlCode < 0 && startIndexOfYamlCode > 0 {\n\t\t\tendIndexOfYamlCode = index\n\t\t}\n\t}\n\n\tyamlDocument := lines[startIndexOfYamlCode:endIndexOfYamlCode]\n\n\tre := regexp.MustCompile(`^(\\s*)---`)\n\tresults := re.FindAllStringSubmatch(yamlDocument[0], -1)\n\tindentation := results[0][1]\n\tfor index, line := range yamlDocument {\n\t\tindentationRegexp := regexp.MustCompile(fmt.Sprintf(`^%s`, indentation))\n\t\tescapesRegexp := regexp.MustCompile(`\\\\([{}])`)\n\t\ttlsRegexp := regexp.MustCompile(\"^.*(tls_key|tls_cert).*$\")\n\n\t\tline = indentationRegexp.ReplaceAllString(line, \"\")\n\t\tline = escapesRegexp.ReplaceAllString(line, \"$1\")\n\t\tline = tlsRegexp.ReplaceAllString(line, \"\")\n\n\t\tyamlDocument[index] = line\n\n\t}\n\n\tyamlString := strings.Join(yamlDocument, \"\\n\")\n\treturn yamlString, nil\n}\n\nfunc download(location string) (io.Reader, int64, error) {\n\tresp, err := http.Get(location)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn resp.Body, resp.ContentLength, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package isolated\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"auth command\", func() {\n\tContext(\"Help\", func() {\n\t\tIt(\"displays the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"--help\")\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\tEventually(session).Should(Say(\"auth - Authenticate non-interactively\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth USERNAME PASSWORD\\n\"))\n\t\t\tEventually(session).Should(Say(\"cf auth CLIENT_ID CLIENT_SECRET --client-credentials\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"ENVIRONMENT VARIABLES:\"))\n\t\t\tEventually(session).Should(Say(\"CF_USERNAME=user\\\\s+Authenticating user. Overridden if USERNAME argument is provided.\"))\n\t\t\tEventually(session).Should(Say(\"CF_PASSWORD=password\\\\s+Password associated with user. Overriden if PASSWORD argument is provided.\"))\n\n\t\t\tEventually(session).Should(Say(\"WARNING:\"))\n\t\t\tEventually(session).Should(Say(\"Providing your password as a command line option is highly discouraged\"))\n\t\t\tEventually(session).Should(Say(\"Your password may be visible to others and may be recorded in your shell history\\n\"))\n\t\t\tEventually(session).Should(Say(\"Consider using the CF_PASSWORD environment variable instead\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"EXAMPLES:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\"my password\\\" \\\\(use quotes for passwords with a space\\\\)\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\\\\\"\\\\\\\\\\\"password\\\\\\\\\\\"\\\\\\\" \\\\(escape quotes if used in password\\\\)\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\tEventually(session).Should(Say(\"--client-credentials\\\\s+Use \\\\(non-user\\\\) service account \\\\(also called client credentials\\\\)\\n\"))\n\t\t\tEventually(session).Should(Say(\"--origin\\\\s+Indicates the identity provider to be used for authentication\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\tEventually(session).Should(Say(\"api, login, target\"))\n\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\t})\n\n\tContext(\"when no positional arguments are provided\", func() {\n\t\tContext(\"and no env variables are provided\", func() {\n\t\t\tIt(\"errors-out with the help information\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\")\n\t\t\t\tEventually(session.Err).Should(Say(\"Username and password not provided.\"))\n\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when env variables are provided\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tenv := map[string]string{\n\t\t\t\t\t\"CF_USERNAME\": username,\n\t\t\t\t\t\"CF_PASSWORD\": password,\n\t\t\t\t}\n\t\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when only a username is provided\", func() {\n\t\tIt(\"errors-out with a password required error and the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-user\")\n\t\t\tEventually(session.Err).Should(Say(\"Password not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when only a password is provided\", func() {\n\t\tIt(\"errors-out with a username required error and the help information\", func() {\n\t\t\tenv := map[string]string{\n\t\t\t\t\"CF_PASSWORD\": \"some-pass\",\n\t\t\t}\n\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\t\t\tEventually(session.Err).Should(Say(\"Username not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when too many arguments are provided\", func() {\n\t\tIt(\"displays an 'unknown flag' error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\", \"-a\", \"api.bosh-lite.com\")\n\n\t\t\tEventually(session.Err).Should(Say(\"Incorrect Usage: unknown flag `a'\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when the API endpoint is not set\", func() {\n\t\tBeforeEach(func() {\n\t\t\thelpers.UnsetAPI()\n\t\t})\n\n\t\tIt(\"displays an error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\tEventually(session.Err).Should(Say(\"No API endpoint set\\\\. Use 'cf login' or 'cf api' to target an endpoint\\\\.\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when no flags are set (logging in with password grant type)\", func() {\n\t\tContext(\"when the user provides an invalid username\/password combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the username and password are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the 'client-credentials' flag is set\", func() {\n\t\tContext(\"when the user provides an invalid client id\/secret combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-client-id\", \"some-client-secret\", \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the client id and client secret are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when a user authenticates with valid client credentials\", func() {\n\t\tBeforeEach(func() {\n\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\n\t\tContext(\"when a different user authenticates with valid password credentials\", func() {\n\t\t\tIt(\"should fail authentication and display an error informing the user they need to log out\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Service account currently logged in\\\\. Use 'cf logout' to log out service account and try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t})\n\n\tContext(\"when the origin flag is set\", func() {\n\t\tContext(\"when a user authenticates with valid user credentials for that origin\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetOIDCCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password, \"--origin\", \"garbage\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the user provides the default origin and valid credentials\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password, \"--origin\", \"uaa\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>fix integration test<commit_after>package isolated\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"auth command\", func() {\n\tContext(\"Help\", func() {\n\t\tIt(\"displays the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"--help\")\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\tEventually(session).Should(Say(\"auth - Authenticate non-interactively\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth USERNAME PASSWORD\\n\"))\n\t\t\tEventually(session).Should(Say(\"cf auth CLIENT_ID CLIENT_SECRET --client-credentials\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"ENVIRONMENT VARIABLES:\"))\n\t\t\tEventually(session).Should(Say(\"CF_USERNAME=user\\\\s+Authenticating user. Overridden if USERNAME argument is provided.\"))\n\t\t\tEventually(session).Should(Say(\"CF_PASSWORD=password\\\\s+Password associated with user. Overriden if PASSWORD argument is provided.\"))\n\n\t\t\tEventually(session).Should(Say(\"WARNING:\"))\n\t\t\tEventually(session).Should(Say(\"Providing your password as a command line option is highly discouraged\"))\n\t\t\tEventually(session).Should(Say(\"Your password may be visible to others and may be recorded in your shell history\\n\"))\n\t\t\tEventually(session).Should(Say(\"Consider using the CF_PASSWORD environment variable instead\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"EXAMPLES:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\"my password\\\" \\\\(use quotes for passwords with a space\\\\)\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\\\\\"\\\\\\\\\\\"password\\\\\\\\\\\"\\\\\\\" \\\\(escape quotes if used in password\\\\)\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\tEventually(session).Should(Say(\"--client-credentials\\\\s+Use \\\\(non-user\\\\) service account \\\\(also called client credentials\\\\)\\n\"))\n\t\t\tEventually(session).Should(Say(\"--origin\\\\s+Indicates the identity provider to be used for authentication\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\tEventually(session).Should(Say(\"api, login, target\"))\n\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\t})\n\n\tContext(\"when no positional arguments are provided\", func() {\n\t\tContext(\"and no env variables are provided\", func() {\n\t\t\tIt(\"errors-out with the help information\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\")\n\t\t\t\tEventually(session.Err).Should(Say(\"Username and password not provided.\"))\n\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when env variables are provided\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tenv := map[string]string{\n\t\t\t\t\t\"CF_USERNAME\": username,\n\t\t\t\t\t\"CF_PASSWORD\": password,\n\t\t\t\t}\n\t\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when only a username is provided\", func() {\n\t\tIt(\"errors-out with a password required error and the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-user\")\n\t\t\tEventually(session.Err).Should(Say(\"Password not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when only a password is provided\", func() {\n\t\tIt(\"errors-out with a username required error and the help information\", func() {\n\t\t\tenv := map[string]string{\n\t\t\t\t\"CF_PASSWORD\": \"some-pass\",\n\t\t\t}\n\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\t\t\tEventually(session.Err).Should(Say(\"Username not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when too many arguments are provided\", func() {\n\t\tIt(\"displays an 'unknown flag' error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\", \"-a\", \"api.bosh-lite.com\")\n\n\t\t\tEventually(session.Err).Should(Say(\"Incorrect Usage: unknown flag `a'\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when the API endpoint is not set\", func() {\n\t\tBeforeEach(func() {\n\t\t\thelpers.UnsetAPI()\n\t\t})\n\n\t\tIt(\"displays an error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\tEventually(session.Err).Should(Say(\"No API endpoint set\\\\. Use 'cf login' or 'cf api' to target an endpoint\\\\.\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when no flags are set (logging in with password grant type)\", func() {\n\t\tContext(\"when the user provides an invalid username\/password combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the username and password are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the 'client-credentials' flag is set\", func() {\n\t\tContext(\"when the user provides an invalid client id\/secret combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-client-id\", \"some-client-secret\", \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the client id and client secret are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when a user authenticates with valid client credentials\", func() {\n\t\tBeforeEach(func() {\n\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\n\t\tContext(\"when a different user authenticates with valid password credentials\", func() {\n\t\t\tIt(\"should fail authentication and display an error informing the user they need to log out\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Service account currently logged in\\\\. Use 'cf logout' to log out service account and try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t})\n\n\tContext(\"when the origin flag is set\", func() {\n\t\tContext(\"when a user authenticates with valid user credentials for that origin\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetOIDCCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password, \"--origin\", \"cli-oidc-provider\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the user provides the default origin and valid credentials\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password, \"--origin\", \"uaa\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcsx_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsfake\"\n\t\"github.com\/jacobsa\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar contentTypeBucketTestCases = []struct {\n\tname string\n\trequest string \/\/ ContentType in request\n\texpected string \/\/ Expected final type\n}{\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ No extension\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t0: {\n\t\tname: \"foo\/bar\",\n\t\trequest: \"\",\n\t\texpected: \"\",\n\t},\n\n\t1: {\n\t\tname: \"foo\/bar\",\n\t\trequest: \"image\/jpeg\",\n\t\texpected: \"image\/jpeg\",\n\t},\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Unknown extension\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t2: {\n\t\tname: \"foo\/bar.asdf\",\n\t\trequest: \"\",\n\t\texpected: \"\",\n\t},\n\n\t3: {\n\t\tname: \"foo\/bar.asdf\",\n\t\trequest: \"image\/jpeg\",\n\t\texpected: \"image\/jpeg\",\n\t},\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Known extension\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t4: {\n\t\tname: \"foo\/bar.jpg\",\n\t\trequest: \"\",\n\t\texpected: \"image\/jpeg\",\n\t},\n\n\t5: {\n\t\tname: \"foo\/bar.jpg\",\n\t\trequest: \"text\/plain\",\n\t\texpected: \"text\/plain\",\n\t},\n}\n\nfunc TestContentTypeBucket_CreateObject(t *testing.T) {\n\tfor i, tc := range contentTypeBucketTestCases {\n\t\t\/\/ Set up a bucket.\n\t\tbucket := gcsfake.NewFakeBucket(timeutil.RealClock(), \"\")\n\n\t\t\/\/ Create the object.\n\t\treq := &gcs.CreateObjectRequest{\n\t\t\tName: tc.name,\n\t\t\tContentType: tc.request,\n\t\t\tContents: strings.NewReader(\"\"),\n\t\t}\n\n\t\to, err := bucket.CreateObject(context.Background(), req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test case %d: CreateObject: %v\", i, err)\n\t\t}\n\n\t\t\/\/ Check the content type.\n\t\tif got, want := o.ContentType, tc.expected; got != want {\n\t\t\tt.Errorf(\"Test case %d: o.ContentType is %q, want %q\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestContentTypeBucket_ComposeObjects(t *testing.T) {\n\tvar err error\n\tctx := context.Background()\n\n\tfor i, tc := range contentTypeBucketTestCases {\n\t\t\/\/ Set up a bucket.\n\t\tbucket := gcsfake.NewFakeBucket(timeutil.RealClock(), \"\")\n\n\t\t\/\/ Create a source object.\n\t\tconst srcName = \"some_src\"\n\t\t_, err = bucket.CreateObject(ctx, &gcs.CreateObjectRequest{\n\t\t\tName: srcName,\n\t\t\tContents: strings.NewReader(\"\"),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test case %d: CreateObject: %v\", err)\n\t\t}\n\n\t\t\/\/ Compose.\n\t\treq := &gcs.ComposeObjectsRequest{\n\t\t\tDstName: tc.name,\n\t\t\tContentType: tc.request,\n\t\t\tSources: []gcs.ComposeSource{{Name: srcName}},\n\t\t}\n\n\t\to, err := bucket.ComposeObjects(ctx, req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test case %d: ComposeObject: %v\", i, err)\n\t\t}\n\n\t\t\/\/ Check the content type.\n\t\tif got, want := o.ContentType, tc.expected; got != want {\n\t\t\tt.Errorf(\"Test case %d: o.ContentType is %q, want %q\", i, got, want)\n\t\t}\n\t}\n}\n<commit_msg>Fix test bugs.<commit_after>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcsx_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/gcsx\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsfake\"\n\t\"github.com\/jacobsa\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar contentTypeBucketTestCases = []struct {\n\tname string\n\trequest string \/\/ ContentType in request\n\texpected string \/\/ Expected final type\n}{\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ No extension\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t0: {\n\t\tname: \"foo\/bar\",\n\t\trequest: \"\",\n\t\texpected: \"\",\n\t},\n\n\t1: {\n\t\tname: \"foo\/bar\",\n\t\trequest: \"image\/jpeg\",\n\t\texpected: \"image\/jpeg\",\n\t},\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Unknown extension\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t2: {\n\t\tname: \"foo\/bar.asdf\",\n\t\trequest: \"\",\n\t\texpected: \"\",\n\t},\n\n\t3: {\n\t\tname: \"foo\/bar.asdf\",\n\t\trequest: \"image\/jpeg\",\n\t\texpected: \"image\/jpeg\",\n\t},\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Known extension\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t4: {\n\t\tname: \"foo\/bar.jpg\",\n\t\trequest: \"\",\n\t\texpected: \"image\/jpeg\",\n\t},\n\n\t5: {\n\t\tname: \"foo\/bar.jpg\",\n\t\trequest: \"text\/plain\",\n\t\texpected: \"text\/plain\",\n\t},\n}\n\nfunc TestContentTypeBucket_CreateObject(t *testing.T) {\n\tfor i, tc := range contentTypeBucketTestCases {\n\t\t\/\/ Set up a bucket.\n\t\tbucket := gcsx.NewContentTypeBucket(\n\t\t\tgcsfake.NewFakeBucket(timeutil.RealClock(), \"\"))\n\n\t\t\/\/ Create the object.\n\t\treq := &gcs.CreateObjectRequest{\n\t\t\tName: tc.name,\n\t\t\tContentType: tc.request,\n\t\t\tContents: strings.NewReader(\"\"),\n\t\t}\n\n\t\to, err := bucket.CreateObject(context.Background(), req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test case %d: CreateObject: %v\", i, err)\n\t\t}\n\n\t\t\/\/ Check the content type.\n\t\tif got, want := o.ContentType, tc.expected; got != want {\n\t\t\tt.Errorf(\"Test case %d: o.ContentType is %q, want %q\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestContentTypeBucket_ComposeObjects(t *testing.T) {\n\tvar err error\n\tctx := context.Background()\n\n\tfor i, tc := range contentTypeBucketTestCases {\n\t\t\/\/ Set up a bucket.\n\t\tbucket := gcsx.NewContentTypeBucket(\n\t\t\tgcsfake.NewFakeBucket(timeutil.RealClock(), \"\"))\n\n\t\t\/\/ Create a source object.\n\t\tconst srcName = \"some_src\"\n\t\t_, err = bucket.CreateObject(ctx, &gcs.CreateObjectRequest{\n\t\t\tName: srcName,\n\t\t\tContents: strings.NewReader(\"\"),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test case %d: CreateObject: %v\", err)\n\t\t}\n\n\t\t\/\/ Compose.\n\t\treq := &gcs.ComposeObjectsRequest{\n\t\t\tDstName: tc.name,\n\t\t\tContentType: tc.request,\n\t\t\tSources: []gcs.ComposeSource{{Name: srcName}},\n\t\t}\n\n\t\to, err := bucket.ComposeObjects(ctx, req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test case %d: ComposeObject: %v\", i, err)\n\t\t}\n\n\t\t\/\/ Check the content type.\n\t\tif got, want := o.ContentType, tc.expected; got != want {\n\t\t\tt.Errorf(\"Test case %d: o.ContentType is %q, want %q\", i, got, want)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package provider\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/terraform\"\n)\n\ntype customLens struct {\n\tcustomLen int\n}\n\nfunc TestAccResourceString(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringBasic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_string.basic\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: \"random_string.basic\",\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t\tImportStateVerifyIgnore: []string{\"length\", \"lower\", \"number\", \"special\", \"upper\", \"min_lower\", \"min_numeric\", \"min_special\", \"min_upper\", \"override_special\"},\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourceStringOverride(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringOverride,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_string.override\", &customLens{\n\t\t\t\t\t\tcustomLen: 4,\n\t\t\t\t\t}),\n\t\t\t\t\tpatternMatch(\"random_string.override\", \"!!!!\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourceStringMin(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringMin,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_string.min\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([a-z])`), 2),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([A-Z])`), 3),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([0-9])`), 4),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([!#@])`), 1),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourceStringErrors(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringInvalidConfig,\n\t\t\t\tExpectError: regexp.MustCompile(`.*length \\(2\\) must be >= min_upper \\+ min_lower \\+ min_numeric \\+ min_special \\(3\\)`),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringLengthTooShortConfig,\n\t\t\t\tExpectError: regexp.MustCompile(`.*expected attribute to be at least 1, got 0`),\n\t\t\t},\n\t\t},\n\t})\n}\n\nconst (\n\ttestAccResourceStringBasic = `\nresource \"random_string\" \"basic\" {\n length = 12\n}`\n\ttestAccResourceStringOverride = `\nresource \"random_string\" \"override\" {\nlength = 4\noverride_special = \"!\"\nlower = false\nupper = false\nnumber = false\n}\n`\n\ttestAccResourceStringMin = `\nresource \"random_string\" \"min\" {\nlength = 12\noverride_special = \"!#@\"\nmin_lower = 2\nmin_upper = 3\nmin_special = 1\nmin_numeric = 4\n}`\n\ttestAccResourceStringInvalidConfig = `\nresource \"random_string\" \"invalid_length\" {\n length = 2\n min_lower = 3\n}`\n\ttestAccResourceStringLengthTooShortConfig = `\nresource \"random_string\" \"invalid_length\" {\n length = 0\n}`\n)\n\nfunc testAccResourceStringCheck(id string, want *customLens) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", id)\n\t\t}\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tcustomStr := rs.Primary.Attributes[\"result\"]\n\n\t\tif got, want := len(customStr), want.customLen; got != want {\n\t\t\treturn fmt.Errorf(\"custom string length is %d; want %d\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc regexMatch(id string, exp *regexp.Regexp, requiredMatches int) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", id)\n\t\t}\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tcustomStr := rs.Primary.Attributes[\"result\"]\n\n\t\tif matches := exp.FindAllStringSubmatchIndex(customStr, -1); len(matches) < requiredMatches {\n\t\t\treturn fmt.Errorf(\"custom string is %s; did not match %s\", customStr, exp)\n\t\t}\n\n\t\treturn nil\n\t}\n}\nfunc patternMatch(id string, want string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", id)\n\t\t}\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\t\tcustomStr := rs.Primary.Attributes[\"result\"]\n\n\t\tif got, want := customStr, want; got != want {\n\t\t\treturn fmt.Errorf(\"custom string is %s; want %s\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n<commit_msg>Fixing test (#177)<commit_after>package provider\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/terraform\"\n)\n\ntype customLens struct {\n\tcustomLen int\n}\n\nfunc TestAccResourceString(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringBasic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_string.basic\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: \"random_string.basic\",\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t\tImportStateVerifyIgnore: []string{\"length\", \"lower\", \"number\", \"special\", \"upper\", \"min_lower\", \"min_numeric\", \"min_special\", \"min_upper\", \"override_special\"},\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourceStringOverride(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringOverride,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_string.override\", &customLens{\n\t\t\t\t\t\tcustomLen: 4,\n\t\t\t\t\t}),\n\t\t\t\t\tpatternMatch(\"random_string.override\", \"!!!!\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourceStringMin(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringMin,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_string.min\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([a-z])`), 2),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([A-Z])`), 3),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([0-9])`), 4),\n\t\t\t\t\tregexMatch(\"random_string.min\", regexp.MustCompile(`([!#@])`), 1),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourceStringErrors(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProtoV5ProviderFactories: testAccProtoV5ProviderFactories(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringInvalidConfig,\n\t\t\t\tExpectError: regexp.MustCompile(`.*The password\/string length \\(2\\) must be >= min_upper \\+ min_lower \\+ min_numeric\\n\\+ min_special \\(3\\)`),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccResourceStringLengthTooShortConfig,\n\t\t\t\tExpectError: regexp.MustCompile(`.*Expected attribute at AttributeName\\(\"length\"\\) to be at least 1, got 0`),\n\t\t\t},\n\t\t},\n\t})\n}\n\nconst (\n\ttestAccResourceStringBasic = `\nresource \"random_string\" \"basic\" {\n length = 12\n}`\n\ttestAccResourceStringOverride = `\nresource \"random_string\" \"override\" {\nlength = 4\noverride_special = \"!\"\nlower = false\nupper = false\nnumber = false\n}\n`\n\ttestAccResourceStringMin = `\nresource \"random_string\" \"min\" {\nlength = 12\noverride_special = \"!#@\"\nmin_lower = 2\nmin_upper = 3\nmin_special = 1\nmin_numeric = 4\n}`\n\ttestAccResourceStringInvalidConfig = `\nresource \"random_string\" \"invalid_length\" {\n length = 2\n min_lower = 3\n}`\n\ttestAccResourceStringLengthTooShortConfig = `\nresource \"random_string\" \"invalid_length\" {\n length = 0\n}`\n)\n\nfunc testAccResourceStringCheck(id string, want *customLens) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", id)\n\t\t}\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tcustomStr := rs.Primary.Attributes[\"result\"]\n\n\t\tif got, want := len(customStr), want.customLen; got != want {\n\t\t\treturn fmt.Errorf(\"custom string length is %d; want %d\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc regexMatch(id string, exp *regexp.Regexp, requiredMatches int) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", id)\n\t\t}\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tcustomStr := rs.Primary.Attributes[\"result\"]\n\n\t\tif matches := exp.FindAllStringSubmatchIndex(customStr, -1); len(matches) < requiredMatches {\n\t\t\treturn fmt.Errorf(\"custom string is %s; did not match %s\", customStr, exp)\n\t\t}\n\n\t\treturn nil\n\t}\n}\nfunc patternMatch(id string, want string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", id)\n\t\t}\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\t\tcustomStr := rs.Primary.Attributes[\"result\"]\n\n\t\tif got, want := customStr, want; got != want {\n\t\t\treturn fmt.Errorf(\"custom string is %s; want %s\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestAuthenticate(t *testing.T) {\n\tparams := []struct{ consKey, consSecret, accKey, accSecret, expected string }{\n\t\t{\"\", \"abc\", \"def\", \"ghi\", \"error\"},\n\t\t{\"abc\", \"\", \"def\", \"ghi\", \"error\"},\n\t\t{\"\", \"abc\", \"def\", \"ghi\", \"error\"},\n\t\t{\"\", \"abc\", \"def\", \"ghi\", \"error\"},\n\t\t{\"\", \"abc\", \"def\", \"ghi\", \"error\"},\n\t\t{\"\", \"abc\", \"def\", \"ghi\", \"error\"},\n\t}\n\ttc := new(RealTwitterClient)\n\tfor _, v := range params {\n\t\tos.Setenv(\"TWIT_CONSUMER_KEY\", v.consKey)\n\t\tos.Setenv(\"TWIT_CONSUMER_SECRET\", v.consSecret)\n\t\tos.Setenv(\"TWIT_ACCESS_TOKEN\", v.accKey)\n\t\tos.Setenv(\"TWIT_ACCESS_TOKEN_SECRET\", v.accSecret)\n\t\terr := tc.authenticate()\n\t\tif v.expected != \"\" && err == nil {\n\t\t\tt.Fail()\n\t\t}\n\t\tif v.expected == \"\" && err != nil {\n\t\t\tt.Fail()\n\t\t}\n\t\tif err != nil && tc.api != nil {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n<commit_msg>Fix test cases<commit_after>package server\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestAuthenticate(t *testing.T) {\n\tparams := []struct{ consKey, consSecret, accKey, accSecret, expected string }{\n\t\t{\"\", \"abc\", \"def\", \"ghi\", \"error\"},\n\t\t{\"abc\", \"\", \"def\", \"ghi\", \"error\"},\n\t\t{\"abc\", \"abc\", \"\", \"ghi\", \"error\"},\n\t\t{\"ghi\", \"abc\", \"def\", \"\", \"error\"},\n\t}\n\ttc := new(RealTwitterClient)\n\tfor _, v := range params {\n\t\tos.Setenv(\"TWIT_CONSUMER_KEY\", v.consKey)\n\t\tos.Setenv(\"TWIT_CONSUMER_SECRET\", v.consSecret)\n\t\tos.Setenv(\"TWIT_ACCESS_TOKEN\", v.accKey)\n\t\tos.Setenv(\"TWIT_ACCESS_TOKEN_SECRET\", v.accSecret)\n\t\terr := tc.authenticate()\n\t\tif v.expected != \"\" && err == nil {\n\t\t\tt.Fail()\n\t\t}\n\t\tif v.expected == \"\" && err != nil {\n\t\t\tt.Fail()\n\t\t}\n\t\tif err != nil && tc.api != nil {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"github.com\/privacybydesign\/gabi\"\n\t\"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n)\n\n\/\/ This file contains the handler functions for the protocol messages, receiving and returning normally\n\/\/ Go-typed messages here (JSON (un)marshalling is handled by the router).\n\/\/ Maintaining the session state is done here, as well as checking whether the session is in the\n\/\/ appropriate status before handling the request.\n\nvar conf *server.Configuration\n\nfunc (session *session) handleDelete() {\n\tif session.finished() {\n\t\treturn\n\t}\n\tsession.markAlive()\n\n\tsession.result = &server.SessionResult{Token: session.token, Status: server.StatusCancelled}\n\tsession.setStatus(server.StatusCancelled)\n}\n\nfunc (session *session) handleGetRequest(min, max *irma.ProtocolVersion) (irma.SessionRequest, *irma.RemoteError) {\n\tif session.status != server.StatusInitialized {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session already started\")\n\t}\n\tsession.markAlive()\n\n\tvar err error\n\tif session.version, err = chooseProtocolVersion(min, max); err != nil {\n\t\treturn nil, session.fail(server.ErrorProtocolVersion, \"\")\n\t}\n\tconf.Logger.Debugf(\"Using protocol version %s for session %s\", session.version.String(), session.token)\n\tsession.request.SetVersion(session.version)\n\n\tsession.setStatus(server.StatusConnected)\n\treturn session.request, nil\n}\n\nfunc (session *session) handleGetStatus() (server.Status, *irma.RemoteError) {\n\treturn session.status, nil\n}\n\nfunc (session *session) handlePostSignature(signature *irma.SignedMessage) (*irma.ProofStatus, *irma.RemoteError) {\n\tif session.status != server.StatusConnected {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session not yet started or already finished\")\n\t}\n\tsession.markAlive()\n\n\tvar err error\n\tvar rerr *irma.RemoteError\n\tsession.result.Signature = signature\n\tsession.result.Disclosed, session.result.ProofStatus, err = signature.Verify(\n\t\tconf.IrmaConfiguration, session.request.(*irma.SignatureRequest))\n\tif err == nil {\n\t\tsession.setStatus(server.StatusDone)\n\t} else {\n\t\tsession.setStatus(server.StatusCancelled)\n\t\tif err == irma.ErrorMissingPublicKey {\n\t\t\trerr = session.fail(server.ErrorUnknownPublicKey, err.Error())\n\t\t} else {\n\t\t\trerr = session.fail(server.ErrorUnknown, err.Error())\n\t\t}\n\t}\n\treturn &session.result.ProofStatus, rerr\n}\n\nfunc (session *session) handlePostDisclosure(disclosure irma.Disclosure) (*irma.ProofStatus, *irma.RemoteError) {\n\tif session.status != server.StatusConnected {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session not yet started or already finished\")\n\t}\n\tsession.markAlive()\n\n\tvar err error\n\tvar rerr *irma.RemoteError\n\tsession.result.Disclosed, session.result.ProofStatus, err = disclosure.Verify(\n\t\tconf.IrmaConfiguration, session.request.(*irma.DisclosureRequest))\n\tif err == nil {\n\t\tsession.setStatus(server.StatusDone)\n\t} else {\n\t\tsession.setStatus(server.StatusCancelled)\n\t\tif err == irma.ErrorMissingPublicKey {\n\t\t\trerr = session.fail(server.ErrorUnknownPublicKey, err.Error())\n\t\t} else {\n\t\t\trerr = session.fail(server.ErrorUnknown, err.Error())\n\t\t}\n\t}\n\treturn &session.result.ProofStatus, rerr\n}\n\nfunc (session *session) handlePostCommitments(commitments *irma.IssueCommitmentMessage) ([]*gabi.IssueSignatureMessage, *irma.RemoteError) {\n\tif session.status != server.StatusConnected {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session not yet started or already finished\")\n\t}\n\tsession.markAlive()\n\n\trequest := session.request.(*irma.IssuanceRequest)\n\tdiscloseCount := len(request.Disclose)\n\tif len(commitments.Proofs) != len(request.Credentials)+discloseCount {\n\t\treturn nil, session.fail(server.ErrorAttributesMissing, \"\")\n\t}\n\n\t\/\/ Compute list of public keys against which to verify the received proofs\n\tdisclosureproofs := irma.ProofList(commitments.Proofs[:discloseCount])\n\tpubkeys, err := disclosureproofs.ExtractPublicKeys(conf.IrmaConfiguration)\n\tif err != nil {\n\t\treturn nil, session.fail(server.ErrorInvalidProofs, err.Error())\n\t}\n\tfor _, cred := range request.Credentials {\n\t\tiss := cred.CredentialTypeID.IssuerIdentifier()\n\t\tpubkey, _ := conf.IrmaConfiguration.PublicKey(iss, cred.KeyCounter) \/\/ No error, already checked earlier\n\t\tpubkeys = append(pubkeys, pubkey)\n\t}\n\n\t\/\/ Verify and merge keyshare server proofs, if any\n\tfor i, proof := range commitments.Proofs {\n\t\tpubkey := pubkeys[i]\n\t\tschemeid := irma.NewIssuerIdentifier(pubkey.Issuer).SchemeManagerIdentifier()\n\t\tif conf.IrmaConfiguration.SchemeManagers[schemeid].Distributed() {\n\t\t\tproofP, err := session.getProofP(commitments, schemeid)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, session.fail(server.ErrorKeyshareProofMissing, err.Error())\n\t\t\t}\n\t\t\tproof.MergeProofP(proofP, pubkey)\n\t\t}\n\t}\n\n\t\/\/ Verify all proofs and check disclosed attributes, if any, against request\n\tsession.result.Disclosed, session.result.ProofStatus, err = commitments.Disclosure().VerifyAgainstDisjunctions(\n\t\tconf.IrmaConfiguration, request.Disclose, request.Context, request.Nonce, pubkeys, false)\n\tif err != nil {\n\t\tif err == irma.ErrorMissingPublicKey {\n\t\t\treturn nil, session.fail(server.ErrorUnknownPublicKey, \"\")\n\t\t} else {\n\t\t\treturn nil, session.fail(server.ErrorUnknown, \"\")\n\t\t}\n\t}\n\tif session.result.ProofStatus == irma.ProofStatusExpired {\n\t\treturn nil, session.fail(server.ErrorAttributesExpired, \"\")\n\t}\n\tif session.result.ProofStatus != irma.ProofStatusValid {\n\t\treturn nil, session.fail(server.ErrorInvalidProofs, \"\")\n\t}\n\n\t\/\/ Compute CL signatures\n\tvar sigs []*gabi.IssueSignatureMessage\n\tfor i, cred := range request.Credentials {\n\t\tid := cred.CredentialTypeID.IssuerIdentifier()\n\t\tpk, _ := conf.IrmaConfiguration.PublicKey(id, cred.KeyCounter)\n\t\tsk, _ := privatekey(id)\n\t\tissuer := gabi.NewIssuer(sk, pk, one)\n\t\tproof := commitments.Proofs[i+discloseCount].(*gabi.ProofU)\n\t\tattributes, err := cred.AttributeList(conf.IrmaConfiguration, 0x03)\n\t\tif err != nil {\n\t\t\treturn nil, session.fail(server.ErrorIssuanceFailed, err.Error())\n\t\t}\n\t\tsig, err := issuer.IssueSignature(proof.U, attributes.Ints, commitments.Nonce2)\n\t\tif err != nil {\n\t\t\treturn nil, session.fail(server.ErrorIssuanceFailed, err.Error())\n\t\t}\n\t\tsigs = append(sigs, sig)\n\t}\n\n\tsession.setStatus(server.StatusDone)\n\treturn sigs, nil\n}\n<commit_msg>Prevent double session cancellation on invalid client responses<commit_after>package core\n\nimport (\n\t\"github.com\/privacybydesign\/gabi\"\n\t\"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n)\n\n\/\/ This file contains the handler functions for the protocol messages, receiving and returning normally\n\/\/ Go-typed messages here (JSON (un)marshalling is handled by the router).\n\/\/ Maintaining the session state is done here, as well as checking whether the session is in the\n\/\/ appropriate status before handling the request.\n\nvar conf *server.Configuration\n\nfunc (session *session) handleDelete() {\n\tif session.finished() {\n\t\treturn\n\t}\n\tsession.markAlive()\n\n\tsession.result = &server.SessionResult{Token: session.token, Status: server.StatusCancelled}\n\tsession.setStatus(server.StatusCancelled)\n}\n\nfunc (session *session) handleGetRequest(min, max *irma.ProtocolVersion) (irma.SessionRequest, *irma.RemoteError) {\n\tif session.status != server.StatusInitialized {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session already started\")\n\t}\n\tsession.markAlive()\n\n\tvar err error\n\tif session.version, err = chooseProtocolVersion(min, max); err != nil {\n\t\treturn nil, session.fail(server.ErrorProtocolVersion, \"\")\n\t}\n\tconf.Logger.Debugf(\"Using protocol version %s for session %s\", session.version.String(), session.token)\n\tsession.request.SetVersion(session.version)\n\n\tsession.setStatus(server.StatusConnected)\n\treturn session.request, nil\n}\n\nfunc (session *session) handleGetStatus() (server.Status, *irma.RemoteError) {\n\treturn session.status, nil\n}\n\nfunc (session *session) handlePostSignature(signature *irma.SignedMessage) (*irma.ProofStatus, *irma.RemoteError) {\n\tif session.status != server.StatusConnected {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session not yet started or already finished\")\n\t}\n\tsession.markAlive()\n\n\tvar err error\n\tvar rerr *irma.RemoteError\n\tsession.result.Signature = signature\n\tsession.result.Disclosed, session.result.ProofStatus, err = signature.Verify(\n\t\tconf.IrmaConfiguration, session.request.(*irma.SignatureRequest))\n\tif err == nil {\n\t\tsession.setStatus(server.StatusDone)\n\t} else {\n\t\tif err == irma.ErrorMissingPublicKey {\n\t\t\trerr = session.fail(server.ErrorUnknownPublicKey, err.Error())\n\t\t} else {\n\t\t\trerr = session.fail(server.ErrorUnknown, err.Error())\n\t\t}\n\t}\n\treturn &session.result.ProofStatus, rerr\n}\n\nfunc (session *session) handlePostDisclosure(disclosure irma.Disclosure) (*irma.ProofStatus, *irma.RemoteError) {\n\tif session.status != server.StatusConnected {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session not yet started or already finished\")\n\t}\n\tsession.markAlive()\n\n\tvar err error\n\tvar rerr *irma.RemoteError\n\tsession.result.Disclosed, session.result.ProofStatus, err = disclosure.Verify(\n\t\tconf.IrmaConfiguration, session.request.(*irma.DisclosureRequest))\n\tif err == nil {\n\t\tsession.setStatus(server.StatusDone)\n\t} else {\n\t\tif err == irma.ErrorMissingPublicKey {\n\t\t\trerr = session.fail(server.ErrorUnknownPublicKey, err.Error())\n\t\t} else {\n\t\t\trerr = session.fail(server.ErrorUnknown, err.Error())\n\t\t}\n\t}\n\treturn &session.result.ProofStatus, rerr\n}\n\nfunc (session *session) handlePostCommitments(commitments *irma.IssueCommitmentMessage) ([]*gabi.IssueSignatureMessage, *irma.RemoteError) {\n\tif session.status != server.StatusConnected {\n\t\treturn nil, server.RemoteError(server.ErrorUnexpectedRequest, \"Session not yet started or already finished\")\n\t}\n\tsession.markAlive()\n\n\trequest := session.request.(*irma.IssuanceRequest)\n\tdiscloseCount := len(request.Disclose)\n\tif len(commitments.Proofs) != len(request.Credentials)+discloseCount {\n\t\treturn nil, session.fail(server.ErrorAttributesMissing, \"\")\n\t}\n\n\t\/\/ Compute list of public keys against which to verify the received proofs\n\tdisclosureproofs := irma.ProofList(commitments.Proofs[:discloseCount])\n\tpubkeys, err := disclosureproofs.ExtractPublicKeys(conf.IrmaConfiguration)\n\tif err != nil {\n\t\treturn nil, session.fail(server.ErrorInvalidProofs, err.Error())\n\t}\n\tfor _, cred := range request.Credentials {\n\t\tiss := cred.CredentialTypeID.IssuerIdentifier()\n\t\tpubkey, _ := conf.IrmaConfiguration.PublicKey(iss, cred.KeyCounter) \/\/ No error, already checked earlier\n\t\tpubkeys = append(pubkeys, pubkey)\n\t}\n\n\t\/\/ Verify and merge keyshare server proofs, if any\n\tfor i, proof := range commitments.Proofs {\n\t\tpubkey := pubkeys[i]\n\t\tschemeid := irma.NewIssuerIdentifier(pubkey.Issuer).SchemeManagerIdentifier()\n\t\tif conf.IrmaConfiguration.SchemeManagers[schemeid].Distributed() {\n\t\t\tproofP, err := session.getProofP(commitments, schemeid)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, session.fail(server.ErrorKeyshareProofMissing, err.Error())\n\t\t\t}\n\t\t\tproof.MergeProofP(proofP, pubkey)\n\t\t}\n\t}\n\n\t\/\/ Verify all proofs and check disclosed attributes, if any, against request\n\tsession.result.Disclosed, session.result.ProofStatus, err = commitments.Disclosure().VerifyAgainstDisjunctions(\n\t\tconf.IrmaConfiguration, request.Disclose, request.Context, request.Nonce, pubkeys, false)\n\tif err != nil {\n\t\tif err == irma.ErrorMissingPublicKey {\n\t\t\treturn nil, session.fail(server.ErrorUnknownPublicKey, \"\")\n\t\t} else {\n\t\t\treturn nil, session.fail(server.ErrorUnknown, \"\")\n\t\t}\n\t}\n\tif session.result.ProofStatus == irma.ProofStatusExpired {\n\t\treturn nil, session.fail(server.ErrorAttributesExpired, \"\")\n\t}\n\tif session.result.ProofStatus != irma.ProofStatusValid {\n\t\treturn nil, session.fail(server.ErrorInvalidProofs, \"\")\n\t}\n\n\t\/\/ Compute CL signatures\n\tvar sigs []*gabi.IssueSignatureMessage\n\tfor i, cred := range request.Credentials {\n\t\tid := cred.CredentialTypeID.IssuerIdentifier()\n\t\tpk, _ := conf.IrmaConfiguration.PublicKey(id, cred.KeyCounter)\n\t\tsk, _ := privatekey(id)\n\t\tissuer := gabi.NewIssuer(sk, pk, one)\n\t\tproof := commitments.Proofs[i+discloseCount].(*gabi.ProofU)\n\t\tattributes, err := cred.AttributeList(conf.IrmaConfiguration, 0x03)\n\t\tif err != nil {\n\t\t\treturn nil, session.fail(server.ErrorIssuanceFailed, err.Error())\n\t\t}\n\t\tsig, err := issuer.IssueSignature(proof.U, attributes.Ints, commitments.Nonce2)\n\t\tif err != nil {\n\t\t\treturn nil, session.fail(server.ErrorIssuanceFailed, err.Error())\n\t\t}\n\t\tsigs = append(sigs, sig)\n\t}\n\n\tsession.setStatus(server.StatusDone)\n\treturn sigs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright (c) 2016 VMware, Inc. All Rights Reserved.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"regexp\"\n\n\t\"github.com\/vmware\/harbor\/src\/common\/api\"\n\t\"github.com\/vmware\/harbor\/src\/common\/dao\"\n\t\"github.com\/vmware\/harbor\/src\/common\/models\"\n\t\"github.com\/vmware\/harbor\/src\/common\/utils\/log\"\n\t\"github.com\/vmware\/harbor\/src\/ui\/config\"\n\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ ProjectAPI handles request to \/api\/projects\/{} \/api\/projects\/{}\/logs\ntype ProjectAPI struct {\n\tapi.BaseAPI\n\tuserID int\n\tprojectID int64\n\tprojectName string\n}\n\ntype projectReq struct {\n\tProjectName string `json:\"project_name\"`\n\tPublic int `json:\"public\"`\n}\n\nconst projectNameMaxLen int = 30\nconst projectNameMinLen int = 4\nconst dupProjectPattern = `Duplicate entry '\\w+' for key 'name'`\n\n\/\/ Prepare validates the URL and the user\nfunc (p *ProjectAPI) Prepare() {\n\tidStr := p.Ctx.Input.Param(\":id\")\n\tif len(idStr) > 0 {\n\t\tvar err error\n\t\tp.projectID, err = strconv.ParseInt(idStr, 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error parsing project id: %s, error: %v\", idStr, err)\n\t\t\tp.CustomAbort(http.StatusBadRequest, \"invalid project id\")\n\t\t}\n\n\t\tproject, err := dao.GetProjectByID(p.projectID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get project %d: %v\", p.projectID, err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"Internal error.\")\n\t\t}\n\t\tif project == nil {\n\t\t\tp.CustomAbort(http.StatusNotFound, fmt.Sprintf(\"project does not exist, id: %v\", p.projectID))\n\t\t}\n\t\tp.projectName = project.Name\n\t}\n}\n\n\/\/ Post ...\nfunc (p *ProjectAPI) Post() {\n\tp.userID = p.ValidateUser()\n\tisSysAdmin, err := dao.IsAdminRole(p.userID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to check admin role: %v\", err)\n\t}\n\tif !isSysAdmin && config.OnlyAdminCreateProject() {\n\t\tlog.Errorf(\"Only sys admin can create project\")\n\t\tp.RenderError(http.StatusForbidden, \"Only system admin can create project\")\n\t\treturn\n\t}\n\tvar req projectReq\n\tp.DecodeJSONReq(&req)\n\tpublic := req.Public\n\terr = validateProjectReq(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Invalid project request, error: %v\", err)\n\t\tp.RenderError(http.StatusBadRequest, fmt.Sprintf(\"invalid request: %v\", err))\n\t\treturn\n\t}\n\tprojectName := req.ProjectName\n\texist, err := dao.ProjectExists(projectName)\n\tif err != nil {\n\t\tlog.Errorf(\"Error happened checking project existence in db, error: %v, project name: %s\", err, projectName)\n\t}\n\tif exist {\n\t\tp.RenderError(http.StatusConflict, \"\")\n\t\treturn\n\t}\n\tproject := models.Project{OwnerID: p.userID, Name: projectName, CreationTime: time.Now(), Public: public}\n\tprojectID, err := dao.AddProject(project)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to add project, error: %v\", err)\n\t\tdup, _ := regexp.MatchString(dupProjectPattern, err.Error())\n\t\tif dup {\n\t\t\tp.RenderError(http.StatusConflict, \"\")\n\t\t} else {\n\t\t\tp.RenderError(http.StatusInternalServerError, \"Failed to add project\")\n\t\t}\n\t\treturn\n\t}\n\tp.Redirect(http.StatusCreated, strconv.FormatInt(projectID, 10))\n}\n\n\/\/ Head ...\nfunc (p *ProjectAPI) Head() {\n\tprojectName := p.GetString(\"project_name\")\n\tif len(projectName) == 0 {\n\t\tp.CustomAbort(http.StatusBadRequest, \"project_name is needed\")\n\t}\n\n\tproject, err := dao.GetProjectByName(projectName)\n\tif err != nil {\n\t\tlog.Errorf(\"error occurred in GetProjectByName: %v\", err)\n\t\tp.CustomAbort(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))\n\t}\n\n\t\/\/ only public project can be Headed by user without login\n\tif project != nil && project.Public == 1 {\n\t\treturn\n\t}\n\n\tuserID := p.ValidateUser()\n\tif project == nil {\n\t\tp.CustomAbort(http.StatusNotFound, http.StatusText(http.StatusNotFound))\n\t}\n\n\tif !checkProjectPermission(userID, project.ProjectID) {\n\t\tp.CustomAbort(http.StatusForbidden, http.StatusText(http.StatusForbidden))\n\t}\n}\n\n\/\/ Get ...\nfunc (p *ProjectAPI) Get() {\n\tproject, err := dao.GetProjectByID(p.projectID)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get project %d: %v\", p.projectID, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))\n\t}\n\n\tif project.Public == 0 {\n\t\tuserID := p.ValidateUser()\n\t\tif !checkProjectPermission(userID, p.projectID) {\n\t\t\tp.CustomAbort(http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized))\n\t\t}\n\t}\n\n\tp.Data[\"json\"] = project\n\tp.ServeJSON()\n}\n\n\/\/ Delete ...\nfunc (p *ProjectAPI) Delete() {\n\tif p.projectID == 0 {\n\t\tp.CustomAbort(http.StatusBadRequest, \"project ID is required\")\n\t}\n\n\tuserID := p.ValidateUser()\n\n\tif !hasProjectAdminRole(userID, p.projectID) {\n\t\tp.CustomAbort(http.StatusForbidden, \"\")\n\t}\n\n\tcontains, err := projectContainsRepo(p.projectName)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to check whether project %s contains any repository: %v\", p.projectName, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\tif contains {\n\t\tp.CustomAbort(http.StatusPreconditionFailed, \"project contains repositores, can not be deleted\")\n\t}\n\n\tcontains, err = projectContainsPolicy(p.projectID)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to check whether project %s contains any policy: %v\", p.projectName, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\tif contains {\n\t\tp.CustomAbort(http.StatusPreconditionFailed, \"project contains policies, can not be deleted\")\n\t}\n\n\tif err = dao.DeleteProject(p.projectID); err != nil {\n\t\tlog.Errorf(\"failed to delete project %d: %v\", p.projectID, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\n\tgo func() {\n\t\tif err := dao.AddAccessLog(models.AccessLog{\n\t\t\tUserID: userID,\n\t\t\tProjectID: p.projectID,\n\t\t\tRepoName: p.projectName + \"\/\",\n\t\t\tRepoTag: \"N\/A\",\n\t\t\tOperation: \"delete\",\n\t\t}); err != nil {\n\t\t\tlog.Errorf(\"failed to add access log: %v\", err)\n\t\t}\n\t}()\n}\n\nfunc projectContainsRepo(name string) (bool, error) {\n\trepositories, err := getReposByProject(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn len(repositories) > 0, nil\n}\n\nfunc projectContainsPolicy(id int64) (bool, error) {\n\tpolicies, err := dao.GetRepPolicyByProject(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn len(policies) > 0, nil\n}\n\n\/\/ List ...\nfunc (p *ProjectAPI) List() {\n\tvar total int64\n\tvar public int\n\tvar err error\n\n\tpage, pageSize := p.GetPaginationParams()\n\n\tvar projectList []models.Project\n\tprojectName := p.GetString(\"project_name\")\n\n\tisPublic := p.GetString(\"is_public\")\n\tif len(isPublic) > 0 {\n\t\tpublic, err = strconv.Atoi(isPublic)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error parsing public property: %v, error: %v\", isPublic, err)\n\t\t\tp.CustomAbort(http.StatusBadRequest, \"invalid project Id\")\n\t\t}\n\t}\n\tisAdmin := false\n\tif public == 1 {\n\t\ttotal, err = dao.GetTotalOfProjects(projectName, 1)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get total of projects: %v\", err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t}\n\t\tprojectList, err = dao.GetProjects(projectName, 1, pageSize, pageSize*(page-1))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get projects: %v\", err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t}\n\t} else {\n\t\t\/\/if the request is not for public projects, user must login or provide credential\n\t\tp.userID = p.ValidateUser()\n\t\tisAdmin, err = dao.IsAdminRole(p.userID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error occured in check admin, error: %v\", err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"Internal error.\")\n\t\t}\n\t\tif isAdmin {\n\t\t\ttotal, err = dao.GetTotalOfProjects(projectName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get total of projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t\tprojectList, err = dao.GetProjects(projectName, pageSize, pageSize*(page-1))\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t} else {\n\t\t\ttotal, err = dao.GetTotalOfUserRelevantProjects(p.userID, projectName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get total of projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t\tprojectList, err = dao.GetUserRelevantProjects(p.userID, projectName, pageSize, pageSize*(page-1))\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < len(projectList); i++ {\n\t\tif public != 1 {\n\t\t\tif isAdmin {\n\t\t\t\tprojectList[i].Role = models.PROJECTADMIN\n\t\t\t} else {\n\t\t\t\troles, err := dao.GetUserProjectRoles(p.userID, projectList[i].ProjectID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"failed to get user's project role: %v\", err)\n\t\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t\t}\n\t\t\t\tprojectList[i].Role = roles[0].RoleID\n\t\t\t}\n\t\t\tif projectList[i].Role == models.PROJECTADMIN {\n\t\t\t\tprojectList[i].Togglable = true\n\t\t\t}\n\t\t}\n\n\t\trepos, err := dao.GetRepositoryByProjectName(projectList[i].Name)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get repositories of project %s: %v\", projectList[i].Name, err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t}\n\n\t\tprojectList[i].RepoCount = len(repos)\n\t}\n\n\tp.SetPaginationHeader(total, page, pageSize)\n\tp.Data[\"json\"] = projectList\n\tp.ServeJSON()\n}\n\n\/\/ ToggleProjectPublic ...\nfunc (p *ProjectAPI) ToggleProjectPublic() {\n\tp.userID = p.ValidateUser()\n\tvar req projectReq\n\n\tprojectID, err := strconv.ParseInt(p.Ctx.Input.Param(\":id\"), 10, 64)\n\tif err != nil {\n\t\tlog.Errorf(\"Error parsing project id: %d, error: %v\", projectID, err)\n\t\tp.RenderError(http.StatusBadRequest, \"invalid project id\")\n\t\treturn\n\t}\n\n\tp.DecodeJSONReq(&req)\n\tpublic := req.Public\n\tif !isProjectAdmin(p.userID, projectID) {\n\t\tlog.Warningf(\"Current user, id: %d does not have project admin role for project, id: %d\", p.userID, projectID)\n\t\tp.RenderError(http.StatusForbidden, \"\")\n\t\treturn\n\t}\n\terr = dao.ToggleProjectPublicity(p.projectID, public)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while updating project, project id: %d, error: %v\", projectID, err)\n\t\tp.RenderError(http.StatusInternalServerError, \"Failed to update project\")\n\t}\n}\n\n\/\/ FilterAccessLog handles GET to \/api\/projects\/{}\/logs\nfunc (p *ProjectAPI) FilterAccessLog() {\n\tp.userID = p.ValidateUser()\n\n\tvar query models.AccessLog\n\tp.DecodeJSONReq(&query)\n\n\tif !checkProjectPermission(p.userID, p.projectID) {\n\t\tlog.Warningf(\"Current user, user id: %d does not have permission to read accesslog of project, id: %d\", p.userID, p.projectID)\n\t\tp.RenderError(http.StatusForbidden, \"\")\n\t\treturn\n\t}\n\tquery.ProjectID = p.projectID\n\tquery.BeginTime = time.Unix(query.BeginTimestamp, 0)\n\tquery.EndTime = time.Unix(query.EndTimestamp, 0)\n\n\tpage, pageSize := p.GetPaginationParams()\n\n\ttotal, err := dao.GetTotalOfAccessLogs(query)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get total of access log: %v\", err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\n\tlogs, err := dao.GetAccessLogs(query, pageSize, pageSize*(page-1))\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get access log: %v\", err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\n\tp.SetPaginationHeader(total, page, pageSize)\n\n\tp.Data[\"json\"] = logs\n\n\tp.ServeJSON()\n}\n\nfunc isProjectAdmin(userID int, pid int64) bool {\n\tisSysAdmin, err := dao.IsAdminRole(userID)\n\tif err != nil {\n\t\tlog.Errorf(\"Error occurred in IsAdminRole, returning false, error: %v\", err)\n\t\treturn false\n\t}\n\n\tif isSysAdmin {\n\t\treturn true\n\t}\n\n\trolelist, err := dao.GetUserProjectRoles(userID, pid)\n\tif err != nil {\n\t\tlog.Errorf(\"Error occurred in GetUserProjectRoles, returning false, error: %v\", err)\n\t\treturn false\n\t}\n\n\thasProjectAdminRole := false\n\tfor _, role := range rolelist {\n\t\tif role.RoleID == models.PROJECTADMIN {\n\t\t\thasProjectAdminRole = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn hasProjectAdminRole\n}\n\nfunc validateProjectReq(req projectReq) error {\n\tpn := req.ProjectName\n\tif isIllegalLength(req.ProjectName, projectNameMinLen, projectNameMaxLen) {\n\t\treturn fmt.Errorf(\"Project name is illegal in length. (greater than 4 or less than 30)\")\n\t}\n\tvalidProjectName := regexp.MustCompile(`^[a-z0-9](?:-*[a-z0-9])*(?:[._][a-z0-9](?:-*[a-z0-9])*)*$`)\n\tlegal := validProjectName.MatchString(pn)\n\tif !legal {\n\t\treturn fmt.Errorf(\"project name is not in lower case or contains illegal characters\")\n\t}\n\treturn nil\n}\n<commit_msg>fix project name length and follow docker project name rule<commit_after>\/*\n Copyright (c) 2016 VMware, Inc. All Rights Reserved.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"regexp\"\n\n\t\"github.com\/vmware\/harbor\/src\/common\/api\"\n\t\"github.com\/vmware\/harbor\/src\/common\/dao\"\n\t\"github.com\/vmware\/harbor\/src\/common\/models\"\n\t\"github.com\/vmware\/harbor\/src\/common\/utils\/log\"\n\t\"github.com\/vmware\/harbor\/src\/ui\/config\"\n\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ ProjectAPI handles request to \/api\/projects\/{} \/api\/projects\/{}\/logs\ntype ProjectAPI struct {\n\tapi.BaseAPI\n\tuserID int\n\tprojectID int64\n\tprojectName string\n}\n\ntype projectReq struct {\n\tProjectName string `json:\"project_name\"`\n\tPublic int `json:\"public\"`\n}\n\nconst projectNameMaxLen int = 30\nconst projectNameMinLen int = 2\nconst restrictedNameChars = `[a-z0-9]+(?:[._-][a-z0-9]+)*`\nconst dupProjectPattern = `Duplicate entry '\\w+' for key 'name'`\n\n\/\/ Prepare validates the URL and the user\nfunc (p *ProjectAPI) Prepare() {\n\tidStr := p.Ctx.Input.Param(\":id\")\n\tif len(idStr) > 0 {\n\t\tvar err error\n\t\tp.projectID, err = strconv.ParseInt(idStr, 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error parsing project id: %s, error: %v\", idStr, err)\n\t\t\tp.CustomAbort(http.StatusBadRequest, \"invalid project id\")\n\t\t}\n\n\t\tproject, err := dao.GetProjectByID(p.projectID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get project %d: %v\", p.projectID, err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"Internal error.\")\n\t\t}\n\t\tif project == nil {\n\t\t\tp.CustomAbort(http.StatusNotFound, fmt.Sprintf(\"project does not exist, id: %v\", p.projectID))\n\t\t}\n\t\tp.projectName = project.Name\n\t}\n}\n\n\/\/ Post ...\nfunc (p *ProjectAPI) Post() {\n\tp.userID = p.ValidateUser()\n\tisSysAdmin, err := dao.IsAdminRole(p.userID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to check admin role: %v\", err)\n\t}\n\tif !isSysAdmin && config.OnlyAdminCreateProject() {\n\t\tlog.Errorf(\"Only sys admin can create project\")\n\t\tp.RenderError(http.StatusForbidden, \"Only system admin can create project\")\n\t\treturn\n\t}\n\tvar req projectReq\n\tp.DecodeJSONReq(&req)\n\tpublic := req.Public\n\terr = validateProjectReq(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Invalid project request, error: %v\", err)\n\t\tp.RenderError(http.StatusBadRequest, fmt.Sprintf(\"invalid request: %v\", err))\n\t\treturn\n\t}\n\tprojectName := req.ProjectName\n\texist, err := dao.ProjectExists(projectName)\n\tif err != nil {\n\t\tlog.Errorf(\"Error happened checking project existence in db, error: %v, project name: %s\", err, projectName)\n\t}\n\tif exist {\n\t\tp.RenderError(http.StatusConflict, \"\")\n\t\treturn\n\t}\n\tproject := models.Project{OwnerID: p.userID, Name: projectName, CreationTime: time.Now(), Public: public}\n\tprojectID, err := dao.AddProject(project)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to add project, error: %v\", err)\n\t\tdup, _ := regexp.MatchString(dupProjectPattern, err.Error())\n\t\tif dup {\n\t\t\tp.RenderError(http.StatusConflict, \"\")\n\t\t} else {\n\t\t\tp.RenderError(http.StatusInternalServerError, \"Failed to add project\")\n\t\t}\n\t\treturn\n\t}\n\tp.Redirect(http.StatusCreated, strconv.FormatInt(projectID, 10))\n}\n\n\/\/ Head ...\nfunc (p *ProjectAPI) Head() {\n\tprojectName := p.GetString(\"project_name\")\n\tif len(projectName) == 0 {\n\t\tp.CustomAbort(http.StatusBadRequest, \"project_name is needed\")\n\t}\n\n\tproject, err := dao.GetProjectByName(projectName)\n\tif err != nil {\n\t\tlog.Errorf(\"error occurred in GetProjectByName: %v\", err)\n\t\tp.CustomAbort(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))\n\t}\n\n\t\/\/ only public project can be Headed by user without login\n\tif project != nil && project.Public == 1 {\n\t\treturn\n\t}\n\n\tuserID := p.ValidateUser()\n\tif project == nil {\n\t\tp.CustomAbort(http.StatusNotFound, http.StatusText(http.StatusNotFound))\n\t}\n\n\tif !checkProjectPermission(userID, project.ProjectID) {\n\t\tp.CustomAbort(http.StatusForbidden, http.StatusText(http.StatusForbidden))\n\t}\n}\n\n\/\/ Get ...\nfunc (p *ProjectAPI) Get() {\n\tproject, err := dao.GetProjectByID(p.projectID)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get project %d: %v\", p.projectID, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))\n\t}\n\n\tif project.Public == 0 {\n\t\tuserID := p.ValidateUser()\n\t\tif !checkProjectPermission(userID, p.projectID) {\n\t\t\tp.CustomAbort(http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized))\n\t\t}\n\t}\n\n\tp.Data[\"json\"] = project\n\tp.ServeJSON()\n}\n\n\/\/ Delete ...\nfunc (p *ProjectAPI) Delete() {\n\tif p.projectID == 0 {\n\t\tp.CustomAbort(http.StatusBadRequest, \"project ID is required\")\n\t}\n\n\tuserID := p.ValidateUser()\n\n\tif !hasProjectAdminRole(userID, p.projectID) {\n\t\tp.CustomAbort(http.StatusForbidden, \"\")\n\t}\n\n\tcontains, err := projectContainsRepo(p.projectName)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to check whether project %s contains any repository: %v\", p.projectName, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\tif contains {\n\t\tp.CustomAbort(http.StatusPreconditionFailed, \"project contains repositores, can not be deleted\")\n\t}\n\n\tcontains, err = projectContainsPolicy(p.projectID)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to check whether project %s contains any policy: %v\", p.projectName, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\tif contains {\n\t\tp.CustomAbort(http.StatusPreconditionFailed, \"project contains policies, can not be deleted\")\n\t}\n\n\tif err = dao.DeleteProject(p.projectID); err != nil {\n\t\tlog.Errorf(\"failed to delete project %d: %v\", p.projectID, err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\n\tgo func() {\n\t\tif err := dao.AddAccessLog(models.AccessLog{\n\t\t\tUserID: userID,\n\t\t\tProjectID: p.projectID,\n\t\t\tRepoName: p.projectName + \"\/\",\n\t\t\tRepoTag: \"N\/A\",\n\t\t\tOperation: \"delete\",\n\t\t}); err != nil {\n\t\t\tlog.Errorf(\"failed to add access log: %v\", err)\n\t\t}\n\t}()\n}\n\nfunc projectContainsRepo(name string) (bool, error) {\n\trepositories, err := getReposByProject(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn len(repositories) > 0, nil\n}\n\nfunc projectContainsPolicy(id int64) (bool, error) {\n\tpolicies, err := dao.GetRepPolicyByProject(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn len(policies) > 0, nil\n}\n\n\/\/ List ...\nfunc (p *ProjectAPI) List() {\n\tvar total int64\n\tvar public int\n\tvar err error\n\n\tpage, pageSize := p.GetPaginationParams()\n\n\tvar projectList []models.Project\n\tprojectName := p.GetString(\"project_name\")\n\n\tisPublic := p.GetString(\"is_public\")\n\tif len(isPublic) > 0 {\n\t\tpublic, err = strconv.Atoi(isPublic)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error parsing public property: %v, error: %v\", isPublic, err)\n\t\t\tp.CustomAbort(http.StatusBadRequest, \"invalid project Id\")\n\t\t}\n\t}\n\tisAdmin := false\n\tif public == 1 {\n\t\ttotal, err = dao.GetTotalOfProjects(projectName, 1)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get total of projects: %v\", err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t}\n\t\tprojectList, err = dao.GetProjects(projectName, 1, pageSize, pageSize*(page-1))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get projects: %v\", err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t}\n\t} else {\n\t\t\/\/if the request is not for public projects, user must login or provide credential\n\t\tp.userID = p.ValidateUser()\n\t\tisAdmin, err = dao.IsAdminRole(p.userID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error occured in check admin, error: %v\", err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"Internal error.\")\n\t\t}\n\t\tif isAdmin {\n\t\t\ttotal, err = dao.GetTotalOfProjects(projectName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get total of projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t\tprojectList, err = dao.GetProjects(projectName, pageSize, pageSize*(page-1))\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t} else {\n\t\t\ttotal, err = dao.GetTotalOfUserRelevantProjects(p.userID, projectName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get total of projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t\tprojectList, err = dao.GetUserRelevantProjects(p.userID, projectName, pageSize, pageSize*(page-1))\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to get projects: %v\", err)\n\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < len(projectList); i++ {\n\t\tif public != 1 {\n\t\t\tif isAdmin {\n\t\t\t\tprojectList[i].Role = models.PROJECTADMIN\n\t\t\t} else {\n\t\t\t\troles, err := dao.GetUserProjectRoles(p.userID, projectList[i].ProjectID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"failed to get user's project role: %v\", err)\n\t\t\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t\t\t}\n\t\t\t\tprojectList[i].Role = roles[0].RoleID\n\t\t\t}\n\t\t\tif projectList[i].Role == models.PROJECTADMIN {\n\t\t\t\tprojectList[i].Togglable = true\n\t\t\t}\n\t\t}\n\n\t\trepos, err := dao.GetRepositoryByProjectName(projectList[i].Name)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get repositories of project %s: %v\", projectList[i].Name, err)\n\t\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t\t}\n\n\t\tprojectList[i].RepoCount = len(repos)\n\t}\n\n\tp.SetPaginationHeader(total, page, pageSize)\n\tp.Data[\"json\"] = projectList\n\tp.ServeJSON()\n}\n\n\/\/ ToggleProjectPublic ...\nfunc (p *ProjectAPI) ToggleProjectPublic() {\n\tp.userID = p.ValidateUser()\n\tvar req projectReq\n\n\tprojectID, err := strconv.ParseInt(p.Ctx.Input.Param(\":id\"), 10, 64)\n\tif err != nil {\n\t\tlog.Errorf(\"Error parsing project id: %d, error: %v\", projectID, err)\n\t\tp.RenderError(http.StatusBadRequest, \"invalid project id\")\n\t\treturn\n\t}\n\n\tp.DecodeJSONReq(&req)\n\tpublic := req.Public\n\tif !isProjectAdmin(p.userID, projectID) {\n\t\tlog.Warningf(\"Current user, id: %d does not have project admin role for project, id: %d\", p.userID, projectID)\n\t\tp.RenderError(http.StatusForbidden, \"\")\n\t\treturn\n\t}\n\terr = dao.ToggleProjectPublicity(p.projectID, public)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while updating project, project id: %d, error: %v\", projectID, err)\n\t\tp.RenderError(http.StatusInternalServerError, \"Failed to update project\")\n\t}\n}\n\n\/\/ FilterAccessLog handles GET to \/api\/projects\/{}\/logs\nfunc (p *ProjectAPI) FilterAccessLog() {\n\tp.userID = p.ValidateUser()\n\n\tvar query models.AccessLog\n\tp.DecodeJSONReq(&query)\n\n\tif !checkProjectPermission(p.userID, p.projectID) {\n\t\tlog.Warningf(\"Current user, user id: %d does not have permission to read accesslog of project, id: %d\", p.userID, p.projectID)\n\t\tp.RenderError(http.StatusForbidden, \"\")\n\t\treturn\n\t}\n\tquery.ProjectID = p.projectID\n\tquery.BeginTime = time.Unix(query.BeginTimestamp, 0)\n\tquery.EndTime = time.Unix(query.EndTimestamp, 0)\n\n\tpage, pageSize := p.GetPaginationParams()\n\n\ttotal, err := dao.GetTotalOfAccessLogs(query)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get total of access log: %v\", err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\n\tlogs, err := dao.GetAccessLogs(query, pageSize, pageSize*(page-1))\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get access log: %v\", err)\n\t\tp.CustomAbort(http.StatusInternalServerError, \"\")\n\t}\n\n\tp.SetPaginationHeader(total, page, pageSize)\n\n\tp.Data[\"json\"] = logs\n\n\tp.ServeJSON()\n}\n\nfunc isProjectAdmin(userID int, pid int64) bool {\n\tisSysAdmin, err := dao.IsAdminRole(userID)\n\tif err != nil {\n\t\tlog.Errorf(\"Error occurred in IsAdminRole, returning false, error: %v\", err)\n\t\treturn false\n\t}\n\n\tif isSysAdmin {\n\t\treturn true\n\t}\n\n\trolelist, err := dao.GetUserProjectRoles(userID, pid)\n\tif err != nil {\n\t\tlog.Errorf(\"Error occurred in GetUserProjectRoles, returning false, error: %v\", err)\n\t\treturn false\n\t}\n\n\thasProjectAdminRole := false\n\tfor _, role := range rolelist {\n\t\tif role.RoleID == models.PROJECTADMIN {\n\t\t\thasProjectAdminRole = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn hasProjectAdminRole\n}\n\nfunc validateProjectReq(req projectReq) error {\n\tpn := req.ProjectName\n\tif isIllegalLength(req.ProjectName, projectNameMinLen, projectNameMaxLen) {\n\t\treturn fmt.Errorf(\"Project name is illegal in length. (greater than 2 or less than 30)\")\n\t}\n\tvalidProjectName := regexp.MustCompile(`^` + restrictedNameChars + `$`)\n\tlegal := validProjectName.MatchString(pn)\n\tif !legal {\n\t\treturn fmt.Errorf(\"project name is not in lower case or contains illegal characters\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage nodes\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/golang\/glog\"\n)\n\ntype kubeNodes struct {\n\tclient *client.Client\n\t\/\/ a means to list all minions\n\tnodeLister *cache.StoreToNodeLister\n\treflector *cache.Reflector\n\t\/\/ Used to stop the existing reflector.\n\tstopChan chan struct{}\n\tgoodNodes []string \/\/ guarded by stateLock\n\tnodeErrors map[string]int \/\/ guarded by stateLock\n\tstateLock sync.RWMutex\n}\n\nfunc (self *kubeNodes) recordNodeError(name string) {\n\tself.stateLock.Lock()\n\tdefer self.stateLock.Unlock()\n\n\tself.nodeErrors[name]++\n}\n\nfunc (self *kubeNodes) recordGoodNodes(nodes []string) {\n\tself.stateLock.Lock()\n\tdefer self.stateLock.Unlock()\n\n\tself.goodNodes = nodes\n}\n\nfunc parseSelectorOrDie(s string) labels.Selector {\n\tselector, err := labels.ParseSelector(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn selector\n}\n\nfunc (self *kubeNodes) List() (*NodeList, error) {\n\tnodeList := newNodeList()\n\tallNodes, err := self.nodeLister.List()\n\tif err != nil {\n\t\tglog.Errorf(\"failed to list minions via watch interface - %v\", err)\n\t\treturn nil, fmt.Errorf(\"failed to list minions via watch interface - %v\", err)\n\t}\n\tglog.V(3).Infof(\"all kube nodes: %+v\", allNodes)\n\n\tgoodNodes := []string{}\n\tfor _, node := range allNodes.Items {\n\t\tnodeInfo := Info{}\n\t\thostname := \"\"\n\t\tfor _, addr := range node.Status.Addresses {\n\t\t\tswitch addr.Type {\n\t\t\tcase api.NodeExternalIP:\n\t\t\t\tnodeInfo.PublicIP = addr.Address\n\t\t\tcase api.NodeInternalIP:\n\t\t\t\tnodeInfo.InternalIP = addr.Address\n\t\t\tcase api.NodeHostName:\n\t\t\t\thostname = addr.Address\n\t\t\t}\n\t\t}\n\t\tif hostname == \"\" {\n\t\t\thostname = node.Name\n\t\t}\n\t\tif nodeInfo.InternalIP == \"\" {\n\t\t\taddrs, err := net.LookupIP(hostname)\n\t\t\tif err == nil {\n\t\t\t\tnodeInfo.InternalIP = addrs[0].String()\n\t\t\t} else {\n\t\t\t\tglog.Errorf(\"Skipping host %s since looking up its IP failed - %s\", node.Name, err)\n\t\t\t\tself.recordNodeError(node.Name)\n\t\t\t}\n\t\t}\n\n\t\tnodeList.Items[Host(hostname)] = nodeInfo\n\t\tgoodNodes = append(goodNodes, node.Name)\n\t}\n\tself.recordGoodNodes(goodNodes)\n\tglog.V(2).Infof(\"kube nodes found: %+v\", nodeList)\n\treturn nodeList, nil\n}\n\nfunc (self *kubeNodes) getState() string {\n\tself.stateLock.RLock()\n\tdefer self.stateLock.RUnlock()\n\n\tstate := \"\\tHealthy Nodes:\\n\"\n\tfor _, node := range self.goodNodes {\n\t\tstate += fmt.Sprintf(\"\\t\\t%s\\n\", node)\n\t}\n\tif len(self.nodeErrors) > 0 {\n\t\tstate += fmt.Sprintf(\"\\tNode Errors: %+v\\n\", self.nodeErrors)\n\t} else {\n\t\tstate += \"\\tNo node errors\\n\"\n\t}\n\treturn state\n}\n\nfunc (self *kubeNodes) DebugInfo() string {\n\tdesc := \"Kubernetes Nodes plugin: \\n\"\n\tdesc += self.getState()\n\tdesc += \"\\n\"\n\n\treturn desc\n}\n\nfunc NewKubeNodes(client *client.Client) (NodesApi, error) {\n\tif client == nil {\n\t\treturn nil, fmt.Errorf(\"client is nil\")\n\t}\n\n\tlw := cache.NewListWatchFromClient(client, \"minions\", api.NamespaceAll, fields.Everything())\n\tnodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}\n\treflector := cache.NewReflector(lw, &api.Node{}, nodeLister.Store, 0)\n\tstopChan := make(chan struct{})\n\treflector.RunUntil(stopChan)\n\n\treturn &kubeNodes{\n\t\tclient: client,\n\t\tnodeLister: nodeLister,\n\t\treflector: reflector,\n\t\tstopChan: stopChan,\n\t\tnodeErrors: make(map[string]int),\n\t}, nil\n}\n<commit_msg>Use node legacy host ip on older kubernetes versions<commit_after>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage nodes\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/golang\/glog\"\n)\n\ntype kubeNodes struct {\n\tclient *client.Client\n\t\/\/ a means to list all minions\n\tnodeLister *cache.StoreToNodeLister\n\treflector *cache.Reflector\n\t\/\/ Used to stop the existing reflector.\n\tstopChan chan struct{}\n\tgoodNodes []string \/\/ guarded by stateLock\n\tnodeErrors map[string]int \/\/ guarded by stateLock\n\tstateLock sync.RWMutex\n}\n\nfunc (self *kubeNodes) recordNodeError(name string) {\n\tself.stateLock.Lock()\n\tdefer self.stateLock.Unlock()\n\n\tself.nodeErrors[name]++\n}\n\nfunc (self *kubeNodes) recordGoodNodes(nodes []string) {\n\tself.stateLock.Lock()\n\tdefer self.stateLock.Unlock()\n\n\tself.goodNodes = nodes\n}\n\nfunc parseSelectorOrDie(s string) labels.Selector {\n\tselector, err := labels.ParseSelector(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn selector\n}\n\nfunc (self *kubeNodes) List() (*NodeList, error) {\n\tnodeList := newNodeList()\n\tallNodes, err := self.nodeLister.List()\n\tif err != nil {\n\t\tglog.Errorf(\"failed to list minions via watch interface - %v\", err)\n\t\treturn nil, fmt.Errorf(\"failed to list minions via watch interface - %v\", err)\n\t}\n\tglog.V(3).Infof(\"all kube nodes: %+v\", allNodes)\n\n\tgoodNodes := []string{}\n\tfor _, node := range allNodes.Items {\n\t\tnodeInfo := Info{}\n\t\thostname := \"\"\n\t\tfor _, addr := range node.Status.Addresses {\n\t\t\tswitch addr.Type {\n\t\t\tcase api.NodeExternalIP:\n\t\t\t\tnodeInfo.PublicIP = addr.Address\n\t\t\tcase api.NodeInternalIP:\n\t\t\t\tnodeInfo.InternalIP = addr.Address\n\t\t\tcase api.NodeLegacyHostIP:\n\t\t\t\tnodeInfo.InternalIP = addr.Address\n\t\t\tcase api.NodeHostName:\n\t\t\t\thostname = addr.Address\n\t\t\t}\n\t\t}\n\t\tif hostname == \"\" {\n\t\t\thostname = node.Name\n\t\t}\n\t\tif nodeInfo.InternalIP == \"\" {\n\t\t\taddrs, err := net.LookupIP(hostname)\n\t\t\tif err == nil {\n\t\t\t\tnodeInfo.InternalIP = addrs[0].String()\n\t\t\t} else {\n\t\t\t\tglog.Errorf(\"Skipping host %s since looking up its IP failed - %s\", node.Name, err)\n\t\t\t\tself.recordNodeError(node.Name)\n\t\t\t}\n\t\t}\n\n\t\tnodeList.Items[Host(hostname)] = nodeInfo\n\t\tgoodNodes = append(goodNodes, node.Name)\n\t}\n\tself.recordGoodNodes(goodNodes)\n\tglog.V(2).Infof(\"kube nodes found: %+v\", nodeList)\n\treturn nodeList, nil\n}\n\nfunc (self *kubeNodes) getState() string {\n\tself.stateLock.RLock()\n\tdefer self.stateLock.RUnlock()\n\n\tstate := \"\\tHealthy Nodes:\\n\"\n\tfor _, node := range self.goodNodes {\n\t\tstate += fmt.Sprintf(\"\\t\\t%s\\n\", node)\n\t}\n\tif len(self.nodeErrors) > 0 {\n\t\tstate += fmt.Sprintf(\"\\tNode Errors: %+v\\n\", self.nodeErrors)\n\t} else {\n\t\tstate += \"\\tNo node errors\\n\"\n\t}\n\treturn state\n}\n\nfunc (self *kubeNodes) DebugInfo() string {\n\tdesc := \"Kubernetes Nodes plugin: \\n\"\n\tdesc += self.getState()\n\tdesc += \"\\n\"\n\n\treturn desc\n}\n\nfunc NewKubeNodes(client *client.Client) (NodesApi, error) {\n\tif client == nil {\n\t\treturn nil, fmt.Errorf(\"client is nil\")\n\t}\n\n\tlw := cache.NewListWatchFromClient(client, \"minions\", api.NamespaceAll, fields.Everything())\n\tnodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}\n\treflector := cache.NewReflector(lw, &api.Node{}, nodeLister.Store, 0)\n\tstopChan := make(chan struct{})\n\treflector.RunUntil(stopChan)\n\n\treturn &kubeNodes{\n\t\tclient: client,\n\t\tnodeLister: nodeLister,\n\t\treflector: reflector,\n\t\tstopChan: stopChan,\n\t\tnodeErrors: make(map[string]int),\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"testing\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"encoding\/json\"\n)\n\n\ntype domainsResult struct {\n\tResult []string `json:\"result\"`\n\tError error `json:\"error\"`\n}\n\n\nfunc request(s *Server, t *testing.T, method string, domain string, body string) *httptest.ResponseRecorder {\n\treqBody := strings.NewReader(body)\n\treq, err := http.NewRequest(method, \"http:\/\/counters.io\/\" + domain, reqBody)\n\tif err != nil {\n\t\tt.Fatalf(\"%s\", err)\n\t}\n\trespw := httptest.NewRecorder()\n\ts.ServeHTTP(respw, req)\n\treturn respw\n}\n\nfunc unmarschal(resp *httptest.ResponseRecorder) domainsResult {\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tvar r domainsResult\n\tjson.Unmarshal(body, &r)\n\treturn r\n}\n\n\nfunc TestDomainsInitiallyEmpty(t *testing.T) {\n\ts := New()\n\tresp := request(s, t, \"GET\", \"\", \"{}\")\n\tif resp.Code != 200 {\n\t\tt.Fatalf(\"Invalid Response Code %d - %s\", resp.Code, resp.Body.String())\t\n\t\treturn\n\t}\n\tresult := unmarschal(resp)\n\tif len(result.Result) != 0 {\n\t\tt.Fatalf(\"Initial resultCount != 0. Got %s\", result)\t\n\t}\n}\n\nfunc TestBadRequest(t *testing.T) {\n\ts := New()\n\tvar resp *httptest.ResponseRecorder\n\tresp = request(s, t, \"GET\", \"\", `{\"invalid\": \"request\"}`)\n\tif resp.Code != 400 {\n\t\tt.Fatalf(\"Invalid Response Code %d - Expected 400\", resp.Code)\t\n\t\treturn\n\t}\n\tresp = request(s, t, \"POST\", \"marvel\", \"{}\")\n\tif resp.Code != 400 {\n\t\tt.Fatalf(\"Invalid Response Code %d - Expected 400\", resp.Code)\t\n\t\treturn\n\t}\n}\n\nfunc TestCreateDomain(t *testing.T) {\n\ts := New()\n\tresp := request(s, t, \"POST\", \"marvel\", `{\n\t\t\"domain\": \"marvel\",\n\t\t\"domainType\": \"mutable\",\n\t\t\"capacity\": 100000,\n\t\t\"values\": []\n\t}`)\n\n\tif resp.Code != 200 {\n\t\tt.Fatalf(\"Invalid Response Code %d - %s\", resp.Code, resp.Body.String())\n\t\treturn\n\t}\n\n\tresult := unmarschal(resp)\n\tif len(result.Result) != 1 {\n\t\tt.Fatalf(\"after add resultCount != 1. Got %s\", len(result.Result))\n\t}\n}\n<commit_msg>cleanup<commit_after>package server\n\nimport (\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"testing\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"encoding\/json\"\n)\n\n\ntype domainsResult struct {\n\tResult []string `json:\"result\"`\n\tError error `json:\"error\"`\n}\n\n\nfunc request(s *Server, t *testing.T, method string, domain string, body string) *httptest.ResponseRecorder {\n\treqBody := strings.NewReader(body)\n\treq, err := http.NewRequest(method, \"http:\/\/counters.io\/\" + domain, reqBody)\n\tif err != nil {\n\t\tt.Fatalf(\"%s\", err)\n\t}\n\trespw := httptest.NewRecorder()\n\ts.ServeHTTP(respw, req)\n\treturn respw\n}\n\nfunc unmarschal(resp *httptest.ResponseRecorder) domainsResult {\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tvar r domainsResult\n\tjson.Unmarshal(body, &r)\n\treturn r\n}\n\n\nfunc TestDomainsInitiallyEmpty(t *testing.T) {\n\ts := New()\n\tresp := request(s, t, \"GET\", \"\", \"{}\")\n\tif resp.Code != 200 {\n\t\tt.Fatalf(\"Invalid Response Code %d - %s\", resp.Code, resp.Body.String())\t\n\t\treturn\n\t}\n\tresult := unmarschal(resp)\n\tif len(result.Result) != 0 {\n\t\tt.Fatalf(\"Initial resultCount != 0. Got %s\", result)\t\n\t}\n}\n\nfunc TestBadRequest(t *testing.T) {\n\ts := New()\n\tvar resp *httptest.ResponseRecorder\n\tresp = request(s, t, \"GET\", \"\", `{\"invalid\": \"request\"}`)\n\tif resp.Code != 400 {\n\t\tt.Fatalf(\"Invalid Response Code %d - Expected 400\", resp.Code)\t\n\t\treturn\n\t}\n\tresp = request(s, t, \"POST\", \"marvel\", \"{}\")\n\tif resp.Code != 400 {\n\t\tt.Fatalf(\"Invalid Response Code %d - Expected 400\", resp.Code)\t\n\t\treturn\n\t}\n}\n\nfunc TestCreateDomain(t *testing.T) {\n\ts := New()\n\tresp := request(s, t, \"POST\", \"marvel\", `{\n\t\t\"domain\": \"marvel\",\n\t\t\"domainType\": \"mutable\",\n\t\t\"capacity\": 100000,\n\t\t\"values\": []\n\t}`)\n\n\tif resp.Code != 200 {\n\t\tt.Fatalf(\"Invalid Response Code %d - %s\", resp.Code, resp.Body.String())\n\t\treturn\n\t}\n\n\tresult := unmarschal(resp)\n\tif len(result.Result) != 1 {\n\t\tt.Fatalf(\"after add resultCount != 1. Got %d\", len(result.Result))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server_test\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/LeoCBS\/garden\/server\"\n)\n\ntype mock struct {\n\tlocation string\n}\n\nfunc (m *mock) Store(rawJson io.ReadCloser) string {\n\treturn m.location\n}\n\nfunc TestPostParameterHandler(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusCreated {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusCreated)\n\t}\n\tresp := rr.Result()\n\tlocation := resp.Header.Get(\"Location\")\n\tif location != expectedLocation {\n\t\tt.Error(\"server don't return expected location\")\n\t}\n}\n<commit_msg>rename Store param<commit_after>package server_test\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/LeoCBS\/garden\/server\"\n)\n\ntype mock struct {\n\tlocation string\n}\n\nfunc (m *mock) Store(body io.ReadCloser) string {\n\treturn m.location\n}\n\nfunc TestPostParameterHandler(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusCreated {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusCreated)\n\t}\n\tresp := rr.Result()\n\tlocation := resp.Header.Get(\"Location\")\n\tif location != expectedLocation {\n\t\tt.Error(\"server don't return expected location\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server_test\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/LeoCBS\/garden\/server\"\n)\n\ntype mock struct {\n\tlocation string\n\terr bool\n}\n\nfunc (m *mock) Save(body io.ReadCloser) (string, error) {\n\tif m.err {\n\t\treturn \"\", errors.New(\"Save returned error\")\n\t}\n\treturn m.location, nil\n}\n\nfunc (m *mock) List() ([]byte, error) {\n\treturn nil, nil\n}\n\nfunc TestSaveParameterSuccess(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\/save\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusCreated {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusCreated)\n\t}\n\tresp := rr.Result()\n\tlocation := resp.Header.Get(\"Location\")\n\tif location != expectedLocation {\n\t\tt.Error(\"server don't return expected location\")\n\t}\n}\n\nfunc TestSaveParameterError(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\/save\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t\terr: true,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusInternalServerError {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusInternalServerError)\n\t}\n}\n\nfunc TestListParameterError(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\/list\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t\terr: true,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusInternalServerError {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusInternalServerError)\n\t}\n}\n<commit_msg>start server test fixture<commit_after>package server_test\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/LeoCBS\/garden\/server\"\n)\n\ntype mock struct {\n\tlocation string\n\terr bool\n}\n\nfunc (m *mock) Save(body io.ReadCloser) (string, error) {\n\tif m.err {\n\t\treturn \"\", errors.New(\"Save returned error\")\n\t}\n\treturn m.location, nil\n}\n\nfunc (m *mock) List() ([]byte, error) {\n\tif m.err {\n\t\treturn nil, errors.New(\"List exploded!!\")\n\t}\n\treturn nil, nil\n}\n\nfunc TestSaveParameterSuccess(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\/save\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusCreated {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusCreated)\n\t}\n\tresp := rr.Result()\n\tlocation := resp.Header.Get(\"Location\")\n\tif location != expectedLocation {\n\t\tt.Error(\"server don't return expected location\")\n\t}\n}\n\nfunc TestSaveParameterError(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\/save\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\texpectedLocation := \"stored\"\n\ts := server.NewServer(&mock{\n\t\tlocation: expectedLocation,\n\t\terr: true,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusInternalServerError {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusInternalServerError)\n\t}\n}\n\ntype fixture struct {\n\treq *http.Request\n\trr *httptest.ResponseRecorder\n}\n\nfunc setUp() {\n}\n\nfunc TestListParameterError(t *testing.T) {\n\treq, err := http.NewRequest(\"POST\", \"\/garden\/v1\/parameter\/list\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\ts := server.NewServer(&mock{\n\t\tlocation: \"\",\n\t\terr: true,\n\t})\n\ts.ServeMux.ServeHTTP(rr, req)\n\n\t\/\/ Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusInternalServerError {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusInternalServerError)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/http\/httputil\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/ericchiang\/oidc\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\n\t\"github.com\/coreos\/dex\/connector\/mock\"\n\t\"github.com\/coreos\/dex\/storage\"\n\t\"github.com\/coreos\/dex\/storage\/memory\"\n)\n\nfunc mustLoad(s string) *rsa.PrivateKey {\n\tblock, _ := pem.Decode([]byte(s))\n\tif block == nil {\n\t\tpanic(\"no pem data found\")\n\t}\n\tkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn key\n}\n\nvar testKey = mustLoad(`-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEArmoiX5G36MKPiVGS1sicruEaGRrbhPbIKOf97aGGQRjXVngo\nKnwd2L4T9CRyABgQm3tLHHcT5crODoy46wX2g9onTZWViWWuhJ5wxXNmUbCAPWHb\nj9SunW53WuLYZ\/IJLNZt5XYCAFPjAakWp8uMuuDwWo5EyFaw85X3FSMhVmmaYDd0\ncn+1H4+NS\/52wX7tWmyvGUNJ8lzjFAnnOtBJByvkyIC7HDphkLQV4j\/\/sMNY1mPX\nHbsYgFv2J\/LIJtkjdYO2UoDhZG3Gvj16fMy2JE2owA8IX4\/s+XAmA2PiTfd0J5b4\ndrAKEcdDl83G6L3depEkTkfvp0ZLsh9xupAvIwIDAQABAoIBABKGgWonPyKA7+AF\nAxS\/MC0\/CZebC6\/+ylnV8lm4K1tkuRKdJp8EmeL4pYPsDxPFepYZLWwzlbB1rxdK\niSWld36fwEb0WXLDkxrQ\/Wdrj3Wjyqs6ZqjLTVS5dAH6UEQSKDlT+U5DD4lbX6RA\ngoCGFUeQNtdXfyTMWHU2+4yKM7NKzUpczFky+0d10Mg0ANj3\/4IILdr3hqkmMSI9\n1TB9ksWBXJxt3nGxAjzSFihQFUlc231cey\/HhYbvAX5fN0xhLxOk88adDcdXE7br\n3Ser1q6XaaFQSMj4oi1+h3RAT9MUjJ6johEqjw0PbEZtOqXvA1x5vfFdei6SqgKn\nAm3BspkCgYEA2lIiKEkT\/Je6ZH4Omhv9atbGoBdETAstL3FnNQjkyVau9f6bxQkl\n4\/sz985JpaiasORQBiTGY8JDT\/hXjROkut91agi2Vafhr29L\/mto7KZglfDsT4b2\n9z\/EZH8wHw7eYhvdoBbMbqNDSI8RrGa4mpLpuN+E0wsFTzSZEL+QMQUCgYEAzIQh\nxnreQvDAhNradMqLmxRpayn1ORaPReD4\/off+mi7hZRLKtP0iNgEVEWHJ6HEqqi1\nr38XAc8ap\/lfOVMar2MLyCFOhYspdHZ+TGLZfr8gg\/Fzeq9IRGKYadmIKVwjMeyH\nREPqg1tyrvMOE0HI5oqkko8JTDJ0OyVC0Vc6+AcCgYAqCzkywugLc\/jcU35iZVOH\nWLdFq1Vmw5w\/D7rNdtoAgCYPj6nV5y4Z2o2mgl6ifXbU7BMRK9Hc8lNeOjg6HfdS\nWahV9DmRA1SuIWPkKjE5qczd81i+9AHpmakrpWbSBF4FTNKAewOBpwVVGuBPcDTK\n59IE3V7J+cxa9YkotYuCNQKBgCwGla7AbHBEm2z+H+DcaUktD7R+B8gOTzFfyLoi\nTdj+CsAquDO0BQQgXG43uWySql+CifoJhc5h4v8d853HggsXa0XdxaWB256yk2Wm\nMePTCRDePVm\/ufLetqiyp1kf+IOaw1Oyux0j5oA62mDS3Iikd+EE4Z+BjPvefY\/L\nE2qpAoGAZo5Wwwk7q8b1n9n\/ACh4LpE+QgbFdlJxlfFLJCKstl37atzS8UewOSZj\nFDWV28nTP9sqbtsmU8Tem2jzMvZ7C\/Q0AuDoKELFUpux8shm8wfIhyaPnXUGZoAZ\nNp4vUwMSYV5mopESLWOg3loBxKyLGFtgGKVCjGiQvy6zISQ4fQo=\n-----END RSA PRIVATE KEY-----`)\n\nfunc newTestServer() (*httptest.Server, *Server) {\n\tvar server *Server\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tserver.ServeHTTP(w, r)\n\t}))\n\tconfig := Config{\n\t\tIssuer: s.URL,\n\t\tStorage: memory.New(),\n\t\tConnectors: []Connector{\n\t\t\t{\n\t\t\t\tID: \"mock\",\n\t\t\t\tDisplayName: \"Mock\",\n\t\t\t\tConnector: mock.New(),\n\t\t\t},\n\t\t},\n\t}\n\tvar err error\n\tif server, err = newServer(config, staticRotationStrategy(testKey)); err != nil {\n\t\tpanic(err)\n\t}\n\tserver.skipApproval = true \/\/ Don't prompt for approval, just immediately redirect with code.\n\treturn s, server\n}\n\nfunc TestNewTestServer(t *testing.T) {\n\tnewTestServer()\n}\n\nfunc TestDiscovery(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thttpServer, _ := newTestServer()\n\tdefer httpServer.Close()\n\n\tp, err := oidc.NewProvider(ctx, httpServer.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get provider: %v\", err)\n\t}\n\trequired := []struct {\n\t\tname, val string\n\t}{\n\t\t{\"issuer\", p.Issuer},\n\t\t{\"authorization_endpoint\", p.AuthURL},\n\t\t{\"token_endpoint\", p.TokenURL},\n\t\t{\"jwks_uri\", p.JWKSURL},\n\t}\n\tfor _, field := range required {\n\t\tif field.val == \"\" {\n\t\t\tt.Errorf(\"server discovery is missing required field %q\", field.name)\n\t\t}\n\t}\n}\n\nfunc TestOAuth2Flow(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thttpServer, s := newTestServer()\n\tdefer httpServer.Close()\n\n\tp, err := oidc.NewProvider(ctx, httpServer.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get provider: %v\", err)\n\t}\n\n\tvar (\n\t\treqDump, respDump []byte\n\t\tgotCode bool\n\t\tstate = \"a_state\"\n\t)\n\tdefer func() {\n\t\tif !gotCode {\n\t\t\tt.Errorf(\"never got a code in callback\\n%s\\n%s\", reqDump, respDump)\n\t\t}\n\t}()\n\n\tvar oauth2Config *oauth2.Config\n\toauth2Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/callback\" {\n\t\t\tq := r.URL.Query()\n\t\t\tif errType := q.Get(\"error\"); errType != \"\" {\n\t\t\t\tif desc := q.Get(\"error_description\"); desc != \"\" {\n\t\t\t\t\tt.Errorf(\"got error from server %s: %s\", errType, desc)\n\t\t\t\t} else {\n\t\t\t\t\tt.Errorf(\"got error from server %s\", errType)\n\t\t\t\t}\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif code := q.Get(\"code\"); code != \"\" {\n\t\t\t\tgotCode = true\n\t\t\t\ttoken, err := oauth2Config.Exchange(ctx, code)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to exchange code for token: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tidToken, ok := token.Extra(\"id_token\").(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"no id token found: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ TODO(ericchiang): validate id token\n\t\t\t\t_ = idToken\n\n\t\t\t\ttoken.Expiry = time.Now().Add(time.Second * -10)\n\t\t\t\tif token.Valid() {\n\t\t\t\t\tt.Errorf(\"token shouldn't be valid\")\n\t\t\t\t}\n\n\t\t\t\tnewToken, err := oauth2Config.TokenSource(ctx, token).Token()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to refresh token: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif token.RefreshToken == newToken.RefreshToken {\n\t\t\t\t\tt.Errorf(\"old refresh token was the same as the new token %q\", token.RefreshToken)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif gotState := q.Get(\"state\"); gotState != state {\n\t\t\t\tt.Errorf(\"state did not match, want=%q got=%q\", state, gotState)\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, oauth2Config.AuthCodeURL(state), http.StatusSeeOther)\n\t}))\n\n\tdefer oauth2Server.Close()\n\n\tredirectURL := oauth2Server.URL + \"\/callback\"\n\tclient := storage.Client{\n\t\tID: \"testclient\",\n\t\tSecret: \"testclientsecret\",\n\t\tRedirectURIs: []string{redirectURL},\n\t}\n\tif err := s.storage.CreateClient(client); err != nil {\n\t\tt.Fatalf(\"failed to create client: %v\", err)\n\t}\n\n\toauth2Config = &oauth2.Config{\n\t\tClientID: client.ID,\n\t\tClientSecret: client.Secret,\n\t\tEndpoint: p.Endpoint(),\n\t\tScopes: []string{oidc.ScopeOpenID, \"profile\", \"email\", \"offline_access\"},\n\t\tRedirectURL: redirectURL,\n\t}\n\n\tresp, err := http.Get(oauth2Server.URL + \"\/login\")\n\tif err != nil {\n\t\tt.Fatalf(\"get failed: %v\", err)\n\t}\n\tif reqDump, err = httputil.DumpRequest(resp.Request, false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif respDump, err = httputil.DumpResponse(resp, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\ntype storageWithKeysTrigger struct {\n\tstorage.Storage\n\tf func()\n}\n\nfunc (s storageWithKeysTrigger) GetKeys() (storage.Keys, error) {\n\ts.f()\n\treturn s.Storage.GetKeys()\n}\n\nfunc TestKeyCacher(t *testing.T) {\n\ttNow := time.Now()\n\tnow := func() time.Time { return tNow }\n\n\ts := memory.New()\n\n\ttests := []struct {\n\t\tbefore func()\n\t\twantCallToStorage bool\n\t}{\n\t\t{\n\t\t\tbefore: func() {},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {\n\t\t\t\ts.UpdateKeys(func(old storage.Keys) (storage.Keys, error) {\n\t\t\t\t\told.NextRotation = tNow.Add(time.Minute)\n\t\t\t\t\treturn old, nil\n\t\t\t\t})\n\t\t\t},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {},\n\t\t\twantCallToStorage: false,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {\n\t\t\t\ttNow = tNow.Add(time.Hour)\n\t\t\t},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {\n\t\t\t\ttNow = tNow.Add(time.Hour)\n\t\t\t\ts.UpdateKeys(func(old storage.Keys) (storage.Keys, error) {\n\t\t\t\t\told.NextRotation = tNow.Add(time.Minute)\n\t\t\t\t\treturn old, nil\n\t\t\t\t})\n\t\t\t},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {},\n\t\t\twantCallToStorage: false,\n\t\t},\n\t}\n\n\tgotCall := false\n\ts = newKeyCacher(storageWithKeysTrigger{s, func() { gotCall = true }}, now)\n\tfor i, tc := range tests {\n\t\tgotCall = false\n\t\ttc.before()\n\t\ts.GetKeys()\n\t\tif gotCall != tc.wantCallToStorage {\n\t\t\tt.Errorf(\"case %d: expected call to storage=%t got call to storage=%t\", i, tc.wantCallToStorage, gotCall)\n\t\t}\n\t}\n}\n<commit_msg>server: run server tests at a non-root URL<commit_after>package server\n\nimport (\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/http\/httputil\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/ericchiang\/oidc\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\n\t\"github.com\/coreos\/dex\/connector\/mock\"\n\t\"github.com\/coreos\/dex\/storage\"\n\t\"github.com\/coreos\/dex\/storage\/memory\"\n)\n\nfunc mustLoad(s string) *rsa.PrivateKey {\n\tblock, _ := pem.Decode([]byte(s))\n\tif block == nil {\n\t\tpanic(\"no pem data found\")\n\t}\n\tkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn key\n}\n\nvar testKey = mustLoad(`-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEArmoiX5G36MKPiVGS1sicruEaGRrbhPbIKOf97aGGQRjXVngo\nKnwd2L4T9CRyABgQm3tLHHcT5crODoy46wX2g9onTZWViWWuhJ5wxXNmUbCAPWHb\nj9SunW53WuLYZ\/IJLNZt5XYCAFPjAakWp8uMuuDwWo5EyFaw85X3FSMhVmmaYDd0\ncn+1H4+NS\/52wX7tWmyvGUNJ8lzjFAnnOtBJByvkyIC7HDphkLQV4j\/\/sMNY1mPX\nHbsYgFv2J\/LIJtkjdYO2UoDhZG3Gvj16fMy2JE2owA8IX4\/s+XAmA2PiTfd0J5b4\ndrAKEcdDl83G6L3depEkTkfvp0ZLsh9xupAvIwIDAQABAoIBABKGgWonPyKA7+AF\nAxS\/MC0\/CZebC6\/+ylnV8lm4K1tkuRKdJp8EmeL4pYPsDxPFepYZLWwzlbB1rxdK\niSWld36fwEb0WXLDkxrQ\/Wdrj3Wjyqs6ZqjLTVS5dAH6UEQSKDlT+U5DD4lbX6RA\ngoCGFUeQNtdXfyTMWHU2+4yKM7NKzUpczFky+0d10Mg0ANj3\/4IILdr3hqkmMSI9\n1TB9ksWBXJxt3nGxAjzSFihQFUlc231cey\/HhYbvAX5fN0xhLxOk88adDcdXE7br\n3Ser1q6XaaFQSMj4oi1+h3RAT9MUjJ6johEqjw0PbEZtOqXvA1x5vfFdei6SqgKn\nAm3BspkCgYEA2lIiKEkT\/Je6ZH4Omhv9atbGoBdETAstL3FnNQjkyVau9f6bxQkl\n4\/sz985JpaiasORQBiTGY8JDT\/hXjROkut91agi2Vafhr29L\/mto7KZglfDsT4b2\n9z\/EZH8wHw7eYhvdoBbMbqNDSI8RrGa4mpLpuN+E0wsFTzSZEL+QMQUCgYEAzIQh\nxnreQvDAhNradMqLmxRpayn1ORaPReD4\/off+mi7hZRLKtP0iNgEVEWHJ6HEqqi1\nr38XAc8ap\/lfOVMar2MLyCFOhYspdHZ+TGLZfr8gg\/Fzeq9IRGKYadmIKVwjMeyH\nREPqg1tyrvMOE0HI5oqkko8JTDJ0OyVC0Vc6+AcCgYAqCzkywugLc\/jcU35iZVOH\nWLdFq1Vmw5w\/D7rNdtoAgCYPj6nV5y4Z2o2mgl6ifXbU7BMRK9Hc8lNeOjg6HfdS\nWahV9DmRA1SuIWPkKjE5qczd81i+9AHpmakrpWbSBF4FTNKAewOBpwVVGuBPcDTK\n59IE3V7J+cxa9YkotYuCNQKBgCwGla7AbHBEm2z+H+DcaUktD7R+B8gOTzFfyLoi\nTdj+CsAquDO0BQQgXG43uWySql+CifoJhc5h4v8d853HggsXa0XdxaWB256yk2Wm\nMePTCRDePVm\/ufLetqiyp1kf+IOaw1Oyux0j5oA62mDS3Iikd+EE4Z+BjPvefY\/L\nE2qpAoGAZo5Wwwk7q8b1n9n\/ACh4LpE+QgbFdlJxlfFLJCKstl37atzS8UewOSZj\nFDWV28nTP9sqbtsmU8Tem2jzMvZ7C\/Q0AuDoKELFUpux8shm8wfIhyaPnXUGZoAZ\nNp4vUwMSYV5mopESLWOg3loBxKyLGFtgGKVCjGiQvy6zISQ4fQo=\n-----END RSA PRIVATE KEY-----`)\n\nfunc newTestServer(path string) (*httptest.Server, *Server) {\n\tvar server *Server\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tserver.ServeHTTP(w, r)\n\t}))\n\ts.URL = s.URL + path\n\tconfig := Config{\n\t\tIssuer: s.URL,\n\t\tStorage: memory.New(),\n\t\tConnectors: []Connector{\n\t\t\t{\n\t\t\t\tID: \"mock\",\n\t\t\t\tDisplayName: \"Mock\",\n\t\t\t\tConnector: mock.New(),\n\t\t\t},\n\t\t},\n\t}\n\tvar err error\n\tif server, err = newServer(config, staticRotationStrategy(testKey)); err != nil {\n\t\tpanic(err)\n\t}\n\tserver.skipApproval = true \/\/ Don't prompt for approval, just immediately redirect with code.\n\treturn s, server\n}\n\nfunc TestNewTestServer(t *testing.T) {\n\tnewTestServer(\"\")\n}\n\nfunc TestDiscovery(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thttpServer, _ := newTestServer(\"\/nonrootpath\")\n\tdefer httpServer.Close()\n\n\tp, err := oidc.NewProvider(ctx, httpServer.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get provider: %v\", err)\n\t}\n\trequired := []struct {\n\t\tname, val string\n\t}{\n\t\t{\"issuer\", p.Issuer},\n\t\t{\"authorization_endpoint\", p.AuthURL},\n\t\t{\"token_endpoint\", p.TokenURL},\n\t\t{\"jwks_uri\", p.JWKSURL},\n\t}\n\tfor _, field := range required {\n\t\tif field.val == \"\" {\n\t\t\tt.Errorf(\"server discovery is missing required field %q\", field.name)\n\t\t}\n\t}\n}\n\nfunc TestOAuth2Flow(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thttpServer, s := newTestServer(\"\/nonrootpath\")\n\tdefer httpServer.Close()\n\n\tp, err := oidc.NewProvider(ctx, httpServer.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get provider: %v\", err)\n\t}\n\n\tvar (\n\t\treqDump, respDump []byte\n\t\tgotCode bool\n\t\tstate = \"a_state\"\n\t)\n\tdefer func() {\n\t\tif !gotCode {\n\t\t\tt.Errorf(\"never got a code in callback\\n%s\\n%s\", reqDump, respDump)\n\t\t}\n\t}()\n\n\tvar oauth2Config *oauth2.Config\n\toauth2Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/callback\" {\n\t\t\tq := r.URL.Query()\n\t\t\tif errType := q.Get(\"error\"); errType != \"\" {\n\t\t\t\tif desc := q.Get(\"error_description\"); desc != \"\" {\n\t\t\t\t\tt.Errorf(\"got error from server %s: %s\", errType, desc)\n\t\t\t\t} else {\n\t\t\t\t\tt.Errorf(\"got error from server %s\", errType)\n\t\t\t\t}\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif code := q.Get(\"code\"); code != \"\" {\n\t\t\t\tgotCode = true\n\t\t\t\ttoken, err := oauth2Config.Exchange(ctx, code)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to exchange code for token: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tidToken, ok := token.Extra(\"id_token\").(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"no id token found: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ TODO(ericchiang): validate id token\n\t\t\t\t_ = idToken\n\n\t\t\t\ttoken.Expiry = time.Now().Add(time.Second * -10)\n\t\t\t\tif token.Valid() {\n\t\t\t\t\tt.Errorf(\"token shouldn't be valid\")\n\t\t\t\t}\n\n\t\t\t\tnewToken, err := oauth2Config.TokenSource(ctx, token).Token()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to refresh token: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif token.RefreshToken == newToken.RefreshToken {\n\t\t\t\t\tt.Errorf(\"old refresh token was the same as the new token %q\", token.RefreshToken)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif gotState := q.Get(\"state\"); gotState != state {\n\t\t\t\tt.Errorf(\"state did not match, want=%q got=%q\", state, gotState)\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, oauth2Config.AuthCodeURL(state), http.StatusSeeOther)\n\t}))\n\n\tdefer oauth2Server.Close()\n\n\tredirectURL := oauth2Server.URL + \"\/callback\"\n\tclient := storage.Client{\n\t\tID: \"testclient\",\n\t\tSecret: \"testclientsecret\",\n\t\tRedirectURIs: []string{redirectURL},\n\t}\n\tif err := s.storage.CreateClient(client); err != nil {\n\t\tt.Fatalf(\"failed to create client: %v\", err)\n\t}\n\n\toauth2Config = &oauth2.Config{\n\t\tClientID: client.ID,\n\t\tClientSecret: client.Secret,\n\t\tEndpoint: p.Endpoint(),\n\t\tScopes: []string{oidc.ScopeOpenID, \"profile\", \"email\", \"offline_access\"},\n\t\tRedirectURL: redirectURL,\n\t}\n\n\tresp, err := http.Get(oauth2Server.URL + \"\/login\")\n\tif err != nil {\n\t\tt.Fatalf(\"get failed: %v\", err)\n\t}\n\tif reqDump, err = httputil.DumpRequest(resp.Request, false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif respDump, err = httputil.DumpResponse(resp, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\ntype storageWithKeysTrigger struct {\n\tstorage.Storage\n\tf func()\n}\n\nfunc (s storageWithKeysTrigger) GetKeys() (storage.Keys, error) {\n\ts.f()\n\treturn s.Storage.GetKeys()\n}\n\nfunc TestKeyCacher(t *testing.T) {\n\ttNow := time.Now()\n\tnow := func() time.Time { return tNow }\n\n\ts := memory.New()\n\n\ttests := []struct {\n\t\tbefore func()\n\t\twantCallToStorage bool\n\t}{\n\t\t{\n\t\t\tbefore: func() {},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {\n\t\t\t\ts.UpdateKeys(func(old storage.Keys) (storage.Keys, error) {\n\t\t\t\t\told.NextRotation = tNow.Add(time.Minute)\n\t\t\t\t\treturn old, nil\n\t\t\t\t})\n\t\t\t},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {},\n\t\t\twantCallToStorage: false,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {\n\t\t\t\ttNow = tNow.Add(time.Hour)\n\t\t\t},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {\n\t\t\t\ttNow = tNow.Add(time.Hour)\n\t\t\t\ts.UpdateKeys(func(old storage.Keys) (storage.Keys, error) {\n\t\t\t\t\told.NextRotation = tNow.Add(time.Minute)\n\t\t\t\t\treturn old, nil\n\t\t\t\t})\n\t\t\t},\n\t\t\twantCallToStorage: true,\n\t\t},\n\t\t{\n\t\t\tbefore: func() {},\n\t\t\twantCallToStorage: false,\n\t\t},\n\t}\n\n\tgotCall := false\n\ts = newKeyCacher(storageWithKeysTrigger{s, func() { gotCall = true }}, now)\n\tfor i, tc := range tests {\n\t\tgotCall = false\n\t\ttc.before()\n\t\ts.GetKeys()\n\t\tif gotCall != tc.wantCallToStorage {\n\t\t\tt.Errorf(\"case %d: expected call to storage=%t got call to storage=%t\", i, tc.wantCallToStorage, gotCall)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package servergroup\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tconfig_util \"github.com\/prometheus\/common\/config\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/prometheus\/config\"\n\tsd_config \"github.com\/prometheus\/prometheus\/discovery\/config\"\n)\n\nvar (\n\t\/\/ DefaultConfig is the Default base promxy configuration\n\tDefaultConfig = Config{\n\t\tAntiAffinity: time.Second * 10,\n\t\tScheme: \"http\",\n\t\tHTTPConfig: HTTPClientConfig{\n\t\t\tDialTimeout: time.Millisecond * 2000, \/\/ Default dial timeout of 200ms\n\t\t},\n\t}\n)\n\n\/\/ Config is the configuration for a ServerGroup that promxy will talk to.\n\/\/ This is where the vast majority of options exist.\ntype Config struct {\n\t\/\/ RemoteRead directs promxy to load data (from the storage API) through the\n\t\/\/ remoteread API on prom.\n\t\/\/ Pros:\n\t\/\/ - StaleNaNs work\n\t\/\/ - ~2x faster (in my local testing, more so if you are using default JSON marshaler in prom)\n\t\/\/\n\t\/\/ Cons:\n\t\/\/ - proto marshaling prom side doesn't stream, so the data being sent\n\t\/\/ over the wire will be 2x its size in memory on the remote prom host.\n\t\/\/ - \"experimental\" API (according to docs) -- meaning this might break\n\t\/\/ without much (if any) warning\n\t\/\/\n\t\/\/ Upstream prom added a StaleNan to determine if a given timeseries has gone\n\t\/\/ NaN -- the problem being that for range vectors they filter out all \"stale\" samples\n\t\/\/ meaning that it isn't possible to get a \"raw\" dump of data through the query\/query_range v1 API\n\t\/\/ The only option that exists in reality is the \"remote read\" API -- which suffers\n\t\/\/ from the same memory-balooning problems that the HTTP+JSON API originally had.\n\t\/\/ It has **less** of a problem (its 2x memory instead of 14x) so it is a viable option.\n\tRemoteRead bool `yaml:\"remote_read\"`\n\t\/\/ HTTP client config for promxy to use when connecting to the various server_groups\n\t\/\/ this is the same config as prometheus\n\tHTTPConfig HTTPClientConfig `yaml:\"http_client\"`\n\t\/\/ Scheme defines how promxy talks to this server group (http, https, etc.)\n\tScheme string `yaml:\"scheme\"`\n\t\/\/ Labels is a set of labels that will be added to all metrics retrieved\n\t\/\/ from this server group\n\tLabels model.LabelSet `json:\"labels\"`\n\t\/\/ RelabelConfigs are similar in function and identical in configuration as prometheus'\n\t\/\/ relabel config for scrape jobs. The difference here being that the source labels\n\t\/\/ you can pull from are from the downstream servergroup target and the labels you are\n\t\/\/ relabeling are that of the timeseries being returned. This allows you to mutate the\n\t\/\/ labelsets returned by that target at runtime.\n\t\/\/ To further illustrate the difference we'll look at an example:\n\t\/\/\n\t\/\/ relabel_configs:\n\t\/\/ - source_labels: [__meta_consul_tags]\n\t\/\/ regex: '.*,prod,.*'\n\t\/\/ action: keep\n\t\/\/ - source_labels: [__meta_consul_dc]\n\t\/\/ regex: '.+'\n\t\/\/ action: replace\n\t\/\/ target_label: datacenter\n\t\/\/\n\t\/\/ If we saw this in a scrape-config we would expect:\n\t\/\/ (1) the scrape would only target hosts with a prod consul label\n\t\/\/ (2) it would add a label to all returned series of datacenter with the value set to whatever the value of __meat_consul_dc was.\n\t\/\/\n\t\/\/ If we saw this same config in promxy (pointing at prometheus hosts instead of some exporter), we'd expect a similar behavior:\n\t\/\/ (1) only targets with the prod consul label would be included in the servergroup\n\t\/\/ (2) it would add a label to all returned series of this servergroup of datacenter with the value set to whatever the value of __meat_consul_dc was.\n\t\/\/\n\t\/\/ So in reality its \"the same\", the difference is in prometheus these apply to the labels\/targets of a scrape job,\n\t\/\/ in promxy they apply to the prometheus hosts in the servergroup - but the behavior is the same.\n\tRelabelConfigs []*config.RelabelConfig `yaml:\"relabel_configs,omitempty\"`\n\t\/\/ Hosts is a set of ServiceDiscoveryConfig options that allow promxy to discover\n\t\/\/ all hosts in the server_group\n\tHosts sd_config.ServiceDiscoveryConfig `yaml:\",inline\"`\n\t\/\/ PathPrefix to prepend to all queries to hosts in this servergroup\n\tPathPrefix string `yaml:\"path_prefix\"`\n\t\/\/ TODO cache this as a model.Time after unmarshal\n\t\/\/ AntiAffinity defines how large of a gap in the timeseries will cause promxy\n\t\/\/ to merge series from 2 hosts in a server_group. This required for a couple reasons\n\t\/\/ (1) Promxy cannot make assumptions on downstream clock-drift and\n\t\/\/ (2) two prometheus hosts scraping the same target may have different times\n\t\/\/ #2 is caused by prometheus storing the time of the scrape as the time the scrape **starts**.\n\t\/\/ in practice this is actually quite frequent as there are a variety of situations that\n\t\/\/ cause variable scrape completion time (slow exporter, serial exporter, network latency, etc.)\n\t\/\/ any one of these can cause the resulting data in prometheus to have the same time but in reality\n\t\/\/ come from different points in time. Best practice for this value is to set it to your scrape interval\n\tAntiAffinity time.Duration `yaml:\"anti_affinity,omitempty\"`\n\n\t\/\/ IgnoreError will hide all errors from this given servergroup\n\tIgnoreError bool `yaml:\"ignore_error\"`\n\n\t\/\/ RelativeTimeRangeConfig defines a relative time range that this servergroup will respond to\n\t\/\/ An example use-case would be if a specific servergroup was long-term storage, it might only\n\t\/\/ have data 3d old and retain 90d of data.\n\t*RelativeTimeRangeConfig `yaml:\"relative_time_range\"`\n\n\t\/\/ AbsoluteTimeRangeConfig defines an absolute time range that this servergroup will respond to\n\t\/\/ An example use-case would be if a specific servergroup was was \"deprecated\" and wasn't getting\n\t\/\/ any new data after a specific given point in time\n\t*AbsoluteTimeRangeConfig `yaml:\"absolute_time_range\"`\n}\n\n\/\/ GetScheme returns the scheme for this servergroup\nfunc (c *Config) GetScheme() string {\n\treturn c.Scheme\n}\n\n\/\/ GetAntiAffinity returns the AntiAffinity time for this servergroup\nfunc (c *Config) GetAntiAffinity() model.Time {\n\treturn model.TimeFromUnix(int64((c.AntiAffinity).Seconds()))\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\t*c = DefaultConfig\n\t\/\/ We want to set c to the defaults and then overwrite it with the input.\n\t\/\/ To make unmarshal fill the plain data struct rather than calling UnmarshalYAML\n\t\/\/ again, we have to hide it using a type indirection.\n\ttype plain Config\n\treturn unmarshal((*plain)(c))\n}\n\n\/\/ HTTPClientConfig extends prometheus' HTTPClientConfig\ntype HTTPClientConfig struct {\n\tDialTimeout time.Duration `yaml:\"dial_timeout\"`\n\tHTTPConfig config_util.HTTPClientConfig `yaml:\",inline\"`\n}\n\n\/\/ TODO: validate config\ntype RelativeTimeRangeConfig struct {\n\tStart *time.Duration `yaml:\"start\"`\n\tEnd *time.Duration `yaml:\"end\"`\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (tr *RelativeTimeRangeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\ttype plain RelativeTimeRangeConfig\n\tif err := unmarshal((*plain)(tr)); err != nil {\n\t\treturn err\n\t}\n\n\treturn tr.Validate()\n}\n\nfunc (tr *RelativeTimeRangeConfig) Validate() error {\n\tif tr.End != nil && tr.Start != nil && *tr.End < *tr.Start {\n\t\treturn fmt.Errorf(\"RelativeTimeRangeConfig: End must be after start\")\n\t}\n\treturn nil\n}\n\n\/\/ TODO: validate config\ntype AbsoluteTimeRangeConfig struct {\n\tStart time.Time `yaml:\"start\"`\n\tEnd time.Time `yaml:\"end\"`\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (tr *AbsoluteTimeRangeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\ttype plain AbsoluteTimeRangeConfig\n\tif err := unmarshal((*plain)(tr)); err != nil {\n\t\treturn err\n\t}\n\n\treturn tr.Validate()\n}\n\nfunc (tr *AbsoluteTimeRangeConfig) Validate() error {\n\tif !tr.Start.IsZero() && !tr.End.IsZero() && tr.End.Before(tr.Start) {\n\t\treturn fmt.Errorf(\"AbsoluteTimeRangeConfig: End must be after start\")\n\t}\n\treturn nil\n}\n<commit_msg>Lint cleanup<commit_after>package servergroup\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tconfig_util \"github.com\/prometheus\/common\/config\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/prometheus\/config\"\n\tsd_config \"github.com\/prometheus\/prometheus\/discovery\/config\"\n)\n\nvar (\n\t\/\/ DefaultConfig is the Default base promxy configuration\n\tDefaultConfig = Config{\n\t\tAntiAffinity: time.Second * 10,\n\t\tScheme: \"http\",\n\t\tHTTPConfig: HTTPClientConfig{\n\t\t\tDialTimeout: time.Millisecond * 2000, \/\/ Default dial timeout of 200ms\n\t\t},\n\t}\n)\n\n\/\/ Config is the configuration for a ServerGroup that promxy will talk to.\n\/\/ This is where the vast majority of options exist.\ntype Config struct {\n\t\/\/ RemoteRead directs promxy to load data (from the storage API) through the\n\t\/\/ remoteread API on prom.\n\t\/\/ Pros:\n\t\/\/ - StaleNaNs work\n\t\/\/ - ~2x faster (in my local testing, more so if you are using default JSON marshaler in prom)\n\t\/\/\n\t\/\/ Cons:\n\t\/\/ - proto marshaling prom side doesn't stream, so the data being sent\n\t\/\/ over the wire will be 2x its size in memory on the remote prom host.\n\t\/\/ - \"experimental\" API (according to docs) -- meaning this might break\n\t\/\/ without much (if any) warning\n\t\/\/\n\t\/\/ Upstream prom added a StaleNan to determine if a given timeseries has gone\n\t\/\/ NaN -- the problem being that for range vectors they filter out all \"stale\" samples\n\t\/\/ meaning that it isn't possible to get a \"raw\" dump of data through the query\/query_range v1 API\n\t\/\/ The only option that exists in reality is the \"remote read\" API -- which suffers\n\t\/\/ from the same memory-balooning problems that the HTTP+JSON API originally had.\n\t\/\/ It has **less** of a problem (its 2x memory instead of 14x) so it is a viable option.\n\tRemoteRead bool `yaml:\"remote_read\"`\n\t\/\/ HTTP client config for promxy to use when connecting to the various server_groups\n\t\/\/ this is the same config as prometheus\n\tHTTPConfig HTTPClientConfig `yaml:\"http_client\"`\n\t\/\/ Scheme defines how promxy talks to this server group (http, https, etc.)\n\tScheme string `yaml:\"scheme\"`\n\t\/\/ Labels is a set of labels that will be added to all metrics retrieved\n\t\/\/ from this server group\n\tLabels model.LabelSet `json:\"labels\"`\n\t\/\/ RelabelConfigs are similar in function and identical in configuration as prometheus'\n\t\/\/ relabel config for scrape jobs. The difference here being that the source labels\n\t\/\/ you can pull from are from the downstream servergroup target and the labels you are\n\t\/\/ relabeling are that of the timeseries being returned. This allows you to mutate the\n\t\/\/ labelsets returned by that target at runtime.\n\t\/\/ To further illustrate the difference we'll look at an example:\n\t\/\/\n\t\/\/ relabel_configs:\n\t\/\/ - source_labels: [__meta_consul_tags]\n\t\/\/ regex: '.*,prod,.*'\n\t\/\/ action: keep\n\t\/\/ - source_labels: [__meta_consul_dc]\n\t\/\/ regex: '.+'\n\t\/\/ action: replace\n\t\/\/ target_label: datacenter\n\t\/\/\n\t\/\/ If we saw this in a scrape-config we would expect:\n\t\/\/ (1) the scrape would only target hosts with a prod consul label\n\t\/\/ (2) it would add a label to all returned series of datacenter with the value set to whatever the value of __meat_consul_dc was.\n\t\/\/\n\t\/\/ If we saw this same config in promxy (pointing at prometheus hosts instead of some exporter), we'd expect a similar behavior:\n\t\/\/ (1) only targets with the prod consul label would be included in the servergroup\n\t\/\/ (2) it would add a label to all returned series of this servergroup of datacenter with the value set to whatever the value of __meat_consul_dc was.\n\t\/\/\n\t\/\/ So in reality its \"the same\", the difference is in prometheus these apply to the labels\/targets of a scrape job,\n\t\/\/ in promxy they apply to the prometheus hosts in the servergroup - but the behavior is the same.\n\tRelabelConfigs []*config.RelabelConfig `yaml:\"relabel_configs,omitempty\"`\n\t\/\/ Hosts is a set of ServiceDiscoveryConfig options that allow promxy to discover\n\t\/\/ all hosts in the server_group\n\tHosts sd_config.ServiceDiscoveryConfig `yaml:\",inline\"`\n\t\/\/ PathPrefix to prepend to all queries to hosts in this servergroup\n\tPathPrefix string `yaml:\"path_prefix\"`\n\t\/\/ TODO cache this as a model.Time after unmarshal\n\t\/\/ AntiAffinity defines how large of a gap in the timeseries will cause promxy\n\t\/\/ to merge series from 2 hosts in a server_group. This required for a couple reasons\n\t\/\/ (1) Promxy cannot make assumptions on downstream clock-drift and\n\t\/\/ (2) two prometheus hosts scraping the same target may have different times\n\t\/\/ #2 is caused by prometheus storing the time of the scrape as the time the scrape **starts**.\n\t\/\/ in practice this is actually quite frequent as there are a variety of situations that\n\t\/\/ cause variable scrape completion time (slow exporter, serial exporter, network latency, etc.)\n\t\/\/ any one of these can cause the resulting data in prometheus to have the same time but in reality\n\t\/\/ come from different points in time. Best practice for this value is to set it to your scrape interval\n\tAntiAffinity time.Duration `yaml:\"anti_affinity,omitempty\"`\n\n\t\/\/ IgnoreError will hide all errors from this given servergroup\n\tIgnoreError bool `yaml:\"ignore_error\"`\n\n\t\/\/ RelativeTimeRangeConfig defines a relative time range that this servergroup will respond to\n\t\/\/ An example use-case would be if a specific servergroup was long-term storage, it might only\n\t\/\/ have data 3d old and retain 90d of data.\n\t*RelativeTimeRangeConfig `yaml:\"relative_time_range\"`\n\n\t\/\/ AbsoluteTimeRangeConfig defines an absolute time range that this servergroup will respond to\n\t\/\/ An example use-case would be if a specific servergroup was was \"deprecated\" and wasn't getting\n\t\/\/ any new data after a specific given point in time\n\t*AbsoluteTimeRangeConfig `yaml:\"absolute_time_range\"`\n}\n\n\/\/ GetScheme returns the scheme for this servergroup\nfunc (c *Config) GetScheme() string {\n\treturn c.Scheme\n}\n\n\/\/ GetAntiAffinity returns the AntiAffinity time for this servergroup\nfunc (c *Config) GetAntiAffinity() model.Time {\n\treturn model.TimeFromUnix(int64((c.AntiAffinity).Seconds()))\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\t*c = DefaultConfig\n\t\/\/ We want to set c to the defaults and then overwrite it with the input.\n\t\/\/ To make unmarshal fill the plain data struct rather than calling UnmarshalYAML\n\t\/\/ again, we have to hide it using a type indirection.\n\ttype plain Config\n\treturn unmarshal((*plain)(c))\n}\n\n\/\/ HTTPClientConfig extends prometheus' HTTPClientConfig\ntype HTTPClientConfig struct {\n\tDialTimeout time.Duration `yaml:\"dial_timeout\"`\n\tHTTPConfig config_util.HTTPClientConfig `yaml:\",inline\"`\n}\n\n\/\/ RelativeTimeRangeConfig configures durations relative from \"now\" to define\n\/\/ a servergroup's time range\ntype RelativeTimeRangeConfig struct {\n\tStart *time.Duration `yaml:\"start\"`\n\tEnd *time.Duration `yaml:\"end\"`\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (tr *RelativeTimeRangeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\ttype plain RelativeTimeRangeConfig\n\tif err := unmarshal((*plain)(tr)); err != nil {\n\t\treturn err\n\t}\n\n\treturn tr.validate()\n}\n\nfunc (tr *RelativeTimeRangeConfig) validate() error {\n\tif tr.End != nil && tr.Start != nil && *tr.End < *tr.Start {\n\t\treturn fmt.Errorf(\"RelativeTimeRangeConfig: End must be after start\")\n\t}\n\treturn nil\n}\n\n\/\/ AbsoluteTimeRangeConfig contains absolute times to define a servergroup's time range\ntype AbsoluteTimeRangeConfig struct {\n\tStart time.Time `yaml:\"start\"`\n\tEnd time.Time `yaml:\"end\"`\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (tr *AbsoluteTimeRangeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\ttype plain AbsoluteTimeRangeConfig\n\tif err := unmarshal((*plain)(tr)); err != nil {\n\t\treturn err\n\t}\n\n\treturn tr.validate()\n}\n\nfunc (tr *AbsoluteTimeRangeConfig) validate() error {\n\tif !tr.Start.IsZero() && !tr.End.IsZero() && tr.End.Before(tr.Start) {\n\t\treturn fmt.Errorf(\"AbsoluteTimeRangeConfig: End must be after start\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package jobmonitor contains types and functions for aggregation job monitoring.\npackage jobmonitor\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/firestore\"\n)\n\n\/\/ Paths should be used when writing to Firestore.\nconst (\n\tProdPath = \"jobs\"\n\tTestPath = \"jobs-test\"\n)\n\n\/\/ PipelineJob represent a Beam pipeline job on an aggregator for a certain level of a aggregation job.\ntype PipelineJob struct {\n\tCreated time.Time `firestore:\"created,omitempty\"`\n\tMessage string `firestore:\"message,omitempty\"`\n\tResult string `firestore:\"result,omitempty\"`\n\tStatus string `firestore:\"status,omitempty\"`\n\tUpdated time.Time `firestore:\"updated,omitempty\"`\n}\n\n\/\/ AggregatorJobs contains the pipeline jobs for different hierarchical levels.\ntype AggregatorJobs struct {\n\t\/\/ The sub jobs are keyed by the 0-based level.\n\tLevelJobs map[int]*PipelineJob\n}\n\n\/\/ AggregationJob represent an aggregation job.\ntype AggregationJob struct {\n\t\/\/ The aggregator is represented by its origin string.\n\tAggregators map[string]*AggregatorJobs\n\t\/\/ Overall status of a job.\n\tCreated time.Time `firestore:\"created,omitempty\"`\n}\n\n\/\/ WriteJobs writes a list of jobs to Firestore. The input jobs are keyed by the query IDs.\nfunc WriteJobs(ctx context.Context, client *firestore.Client, path string, jobs map[string]*AggregationJob) error {\n\tfor queryID, job := range jobs {\n\t\t_, err := client.Collection(path).Doc(queryID).Set(ctx, map[string]interface{}{\n\t\t\t\"created\": job.Created,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor origin, aggjobs := range job.Aggregators {\n\t\t\tfor level, subjob := range aggjobs.LevelJobs {\n\t\t\t\t_, err := client.Collection(path).Doc(queryID).Collection(\"aggregators\").Doc(origin).Collection(\"levels\").Doc(fmt.Sprintf(\"level-%d\", level)).Set(ctx, subjob)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Create aggregator docs in Firestore explicitly<commit_after>\/\/ Package jobmonitor contains types and functions for aggregation job monitoring.\npackage jobmonitor\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/firestore\"\n)\n\n\/\/ Paths should be used when writing to Firestore.\nconst (\n\tProdPath = \"jobs\"\n\tTestPath = \"jobs-test\"\n)\n\n\/\/ PipelineJob represent a Beam pipeline job on an aggregator for a certain level of a aggregation job.\ntype PipelineJob struct {\n\tCreated time.Time `firestore:\"created,omitempty\"`\n\tMessage string `firestore:\"message,omitempty\"`\n\tResult string `firestore:\"result,omitempty\"`\n\tStatus string `firestore:\"status,omitempty\"`\n\tUpdated time.Time `firestore:\"updated,omitempty\"`\n}\n\n\/\/ AggregatorJobs contains the pipeline jobs for different hierarchical levels.\ntype AggregatorJobs struct {\n\t\/\/ The sub jobs are keyed by the 0-based level.\n\tLevelJobs map[int]*PipelineJob\n}\n\n\/\/ AggregationJob represent an aggregation job.\ntype AggregationJob struct {\n\t\/\/ The aggregator is represented by its origin string.\n\tAggregators map[string]*AggregatorJobs\n\t\/\/ Overall status of a job.\n\tCreated time.Time `firestore:\"created,omitempty\"`\n}\n\n\/\/ WriteJobs writes a list of jobs to Firestore. The input jobs are keyed by the query IDs.\nfunc WriteJobs(ctx context.Context, client *firestore.Client, path string, jobs map[string]*AggregationJob) error {\n\tfor queryID, job := range jobs {\n\t\t_, err := client.Collection(path).Doc(queryID).Set(ctx, map[string]interface{}{\n\t\t\t\"created\": job.Created,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor origin, aggjobs := range job.Aggregators {\n\t\t\t_, err := client.Collection(path).Doc(queryID).Collection(\"aggregators\").Doc(origin).Create(ctx, map[string]interface{}{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor level, subjob := range aggjobs.LevelJobs {\n\t\t\t\t_, err := client.Collection(path).Doc(queryID).Collection(\"aggregators\").Doc(origin).Collection(\"levels\").Doc(fmt.Sprintf(\"level-%d\", level)).Set(ctx, subjob)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mapreduce\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"testing\/iotest\"\n\n\t\"github.com\/pachyderm-io\/pfs\/lib\/btrfs\"\n\t\"github.com\/pachyderm-io\/pfs\/lib\/route\"\n\t\"github.com\/samalba\/dockerclient\"\n)\n\nvar retries int = 5\n\n\/\/ StartContainer pulls image and starts a container from it with command. It\n\/\/ returns the container id or an error.\nfunc spinupContainer(image string, command []string) (string, error) {\n\tlog.Print(\"spinupContainer\", \" \", image, \" \", command)\n\tdocker, err := dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", err\n\t}\n\tif err := docker.PullImage(image, nil); err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", err\n\t}\n\n\tcontainerConfig := &dockerclient.ContainerConfig{Image: image, Cmd: command}\n\n\tcontainerId, err := docker.CreateContainer(containerConfig, \"\")\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", nil\n\t}\n\n\tif err := docker.StartContainer(containerId, &dockerclient.HostConfig{}); err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", err\n\t}\n\n\treturn containerId, nil\n}\n\nfunc ipAddr(containerId string) (string, error) {\n\tdocker, err := dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcontainerInfo, err := docker.InspectContainer(containerId)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn containerInfo.NetworkSettings.IpAddress, nil\n}\n\nfunc retry(f func() error, retries int, pause time.Duration) error {\n\tvar err error\n\tfor i := 0; i < retries; i++ {\n\t\terr = f()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(pause)\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ contains checks if set contains val. It assums that set has already been\n\/\/ sorted.\nfunc contains(set []string, val string) bool {\n\tindex := sort.SearchStrings(set, val)\n\treturn index < len(set) && set[index] == val\n}\n\ntype Job struct {\n\tType string `json:\"type\"`\n\tInput string `json:\"input\"`\n\tImage string `json:\"image\"`\n\tCommand []string `json:\"command\"`\n}\n\ntype materializeInfo struct {\n\tIn, Out, Branch, Commit string\n}\n\nfunc PrepJob(job Job, jobPath string, m materializeInfo) error {\n\tif err := btrfs.MkdirAll(path.Join(m.Out, m.Branch, jobPath)); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Map(job Job, jobPath string, m materializeInfo, host string) error {\n\terr := PrepJob(job, path.Base(jobPath), m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif job.Type != \"map\" {\n\t\treturn fmt.Errorf(\"runMap called on a job of type \\\"%s\\\". Should be \\\"map\\\".\", job.Type)\n\t}\n\n\tinFiles, err := btrfs.ReadDir(path.Join(m.In, m.Commit, job.Input))\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Print(\"In Map for \", jobPath, \" len(inFiles) = \", len(inFiles))\n\n\tfiles := make(chan os.FileInfo, 20000)\n\n\t\/\/ spawn four worker goroutines\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor inF := range files {\n\t\t\t\tinFile, err := btrfs.Open(path.Join(m.In, m.Commit, job.Input, inF.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer inFile.Close()\n\n\t\t\t\tvar resp *http.Response\n\t\t\t\terr = retry(func() error {\n\t\t\t\t\tlog.Print(\"Posting: \", inF.Name())\n\t\t\t\t\tresp, err = http.Post(\"http:\/\/\"+path.Join(host, inF.Name()), \"application\/text\", inFile)\n\t\t\t\t\treturn err\n\t\t\t\t}, 5, 200*time.Millisecond)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer resp.Body.Close()\n\n\t\t\t\toutFile, err := btrfs.Create(path.Join(m.Out, m.Branch, jobPath, inF.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer outFile.Close()\n\t\t\t\tif _, err := io.Copy(outFile, resp.Body); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor _, inF := range inFiles {\n\t\tfiles <- inF\n\t}\n\n\tclose(files)\n\n\treturn nil\n}\n\nfunc Reduce(job Job, jobPath string, m materializeInfo, host string, shard, modulos uint64) error {\n\tif (route.HashResource(path.Join(\"\/job\", jobPath)) % modulos) != shard {\n\t\t\/\/ This resource isn't supposed to be located on this machine.\n\t\treturn nil\n\t}\n\tlog.Print(\"Reduce: \", job, \" \", jobPath, \" \")\n\tif job.Type != \"reduce\" {\n\t\treturn fmt.Errorf(\"Reduce called on a job of type \\\"%s\\\". Should be \\\"reduce\\\".\", job.Type)\n\t}\n\n\t\/\/ Notice we're just passing \"host\" here. Multicast will fill in the host\n\t\/\/ field so we don't actually need to specify it.\n\treq, err := http.NewRequest(\"GET\", \"http:\/\/host\/\"+path.Join(job.Input, \"file\", \"*\")+\"?commit=\"+m.Commit, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_reader, err := route.Multicast(req, \"\/pfs\/master\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer _reader.Close()\n\treader := iotest.NewReadLogger(\"Reduce\", _reader)\n\n\tvar resp *http.Response\n\terr = retry(func() error {\n\t\tresp, err = http.Post(\"http:\/\/\"+path.Join(host, job.Input), \"application\/text\", reader)\n\t\treturn err\n\t}, 5, 200*time.Millisecond)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Received error %s\", resp.Status)\n\t}\n\n\toutFile, err := btrfs.Create(path.Join(m.Out, m.Branch, jobPath))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\tif _, err := io.Copy(outFile, resp.Body); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype jobCond struct {\n\tsync.Cond\n\tDone bool\n}\n\nvar jobs map[string]*jobCond = make(map[string]*jobCond)\nvar jobsAccess sync.Mutex\n\n\/\/ jobCond returns the name of the condition variable for job.\nfunc condKey(in_repo, commit, job string) string {\n\treturn path.Join(in_repo, commit, job)\n}\n\nfunc ensureCond(name string) {\n\tjobsAccess.Lock()\n\tdefer jobsAccess.Unlock()\n\tif _, ok := jobs[name]; !ok {\n\t\tjobs[name] = &jobCond{sync.Cond{L: &sync.Mutex{}}, false}\n\t}\n}\n\nfunc broadcast(in_repo, commit, job string) {\n\tname := condKey(in_repo, commit, job)\n\tensureCond(name)\n\tjobs[name].L.Lock()\n\tjobs[name].Done = true\n\tjobs[name].Broadcast()\n\tjobs[name].L.Unlock()\n}\n\nfunc WaitJob(in_repo, commit, job string) {\n\tname := condKey(in_repo, commit, job)\n\tensureCond(name)\n\tjobs[name].L.Lock()\n\tfor !jobs[name].Done {\n\t\tjobs[name].Wait()\n\t}\n\tjobs[name].L.Unlock()\n}\n\n\/\/ Materialize parses the jobs found in `in_repo`\/`commit`\/`jobDir` runs them\n\/\/ with `in_repo\/commit` as input, outputs the results to `out_repo`\/`branch`\n\/\/ and commits them as `out_repo`\/`commit`\nfunc Materialize(in_repo, branch, commit, out_repo, jobDir string, shard, modulos uint64) error {\n\tlog.Printf(\"Materialize: %s %s %s %s %s.\", in_repo, branch, commit, out_repo, jobDir)\n\t\/\/ We make sure that this function always commits so that we know the comp\n\t\/\/ repo stays in sync with the data repo.\n\tdefer func() {\n\t\tif err := btrfs.Commit(out_repo, commit, branch); err != nil {\n\t\t\tlog.Print(\"btrfs.Commit error in Materialize: \", err)\n\t\t}\n\t}()\n\t\/\/ First check if the jobs dir actually exists.\n\texists, err := btrfs.FileExists(path.Join(in_repo, commit, jobDir))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\t\/\/ Perfectly valid to have no jobs dir, it just means we have no work\n\t\t\/\/ to do.\n\t\treturn nil\n\t}\n\n\tdocker, err := dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnewFiles, err := btrfs.NewFiles(in_repo, commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsort.Strings(newFiles)\n\n\tjobsPath := path.Join(in_repo, commit, jobDir)\n\tjobs, err := btrfs.ReadDir(jobsPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor _, jobInfo := range jobs {\n\t\twg.Add(1)\n\t\tgo func(jobInfo os.FileInfo) {\n\t\t\tdefer wg.Done()\n\t\t\tdefer broadcast(in_repo, commit, jobInfo.Name())\n\t\t\tjobFile, err := btrfs.Open(path.Join(jobsPath, jobInfo.Name()))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer jobFile.Close()\n\t\t\tdecoder := json.NewDecoder(jobFile)\n\t\t\tjob := Job{}\n\t\t\tif err = decoder.Decode(&job); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Print(\"Job: \", job)\n\t\t\tm := materializeInfo{in_repo, out_repo, branch, commit}\n\n\t\t\tcontainerId, err := spinupContainer(job.Image, job.Command)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer docker.StopContainer(containerId, 5)\n\n\t\t\tcontainerAddr, err := ipAddr(containerId)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif job.Type == \"map\" {\n\t\t\t\terr := Map(job, jobInfo.Name(), m, containerAddr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else if job.Type == \"reduce\" {\n\t\t\t\terr := Reduce(job, jobInfo.Name(), m, containerAddr, shard, modulos)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Job %s has unrecognized type: %s.\", jobInfo.Name(), job.Type)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(jobInfo)\n\t}\n\treturn nil\n}\n<commit_msg>Adds more logging.<commit_after>package mapreduce\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"testing\/iotest\"\n\n\t\"github.com\/pachyderm-io\/pfs\/lib\/btrfs\"\n\t\"github.com\/pachyderm-io\/pfs\/lib\/route\"\n\t\"github.com\/samalba\/dockerclient\"\n)\n\nvar retries int = 5\n\n\/\/ StartContainer pulls image and starts a container from it with command. It\n\/\/ returns the container id or an error.\nfunc spinupContainer(image string, command []string) (string, error) {\n\tlog.Print(\"spinupContainer\", \" \", image, \" \", command)\n\tdocker, err := dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", err\n\t}\n\tif err := docker.PullImage(image, nil); err != nil {\n\t\tlog.Print(\"Image: \", image, \" Err: \", err)\n\t\treturn \"\", err\n\t}\n\n\tcontainerConfig := &dockerclient.ContainerConfig{Image: image, Cmd: command}\n\n\tcontainerId, err := docker.CreateContainer(containerConfig, \"\")\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", nil\n\t}\n\n\tif err := docker.StartContainer(containerId, &dockerclient.HostConfig{}); err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\", err\n\t}\n\n\treturn containerId, nil\n}\n\nfunc ipAddr(containerId string) (string, error) {\n\tdocker, err := dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcontainerInfo, err := docker.InspectContainer(containerId)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn containerInfo.NetworkSettings.IpAddress, nil\n}\n\nfunc retry(f func() error, retries int, pause time.Duration) error {\n\tvar err error\n\tfor i := 0; i < retries; i++ {\n\t\terr = f()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(pause)\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ contains checks if set contains val. It assums that set has already been\n\/\/ sorted.\nfunc contains(set []string, val string) bool {\n\tindex := sort.SearchStrings(set, val)\n\treturn index < len(set) && set[index] == val\n}\n\ntype Job struct {\n\tType string `json:\"type\"`\n\tInput string `json:\"input\"`\n\tImage string `json:\"image\"`\n\tCommand []string `json:\"command\"`\n}\n\ntype materializeInfo struct {\n\tIn, Out, Branch, Commit string\n}\n\nfunc PrepJob(job Job, jobPath string, m materializeInfo) error {\n\tif err := btrfs.MkdirAll(path.Join(m.Out, m.Branch, jobPath)); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Map(job Job, jobPath string, m materializeInfo, host string) error {\n\terr := PrepJob(job, path.Base(jobPath), m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif job.Type != \"map\" {\n\t\treturn fmt.Errorf(\"runMap called on a job of type \\\"%s\\\". Should be \\\"map\\\".\", job.Type)\n\t}\n\n\tinFiles, err := btrfs.ReadDir(path.Join(m.In, m.Commit, job.Input))\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Print(\"In Map for \", jobPath, \" len(inFiles) = \", len(inFiles))\n\n\tfiles := make(chan os.FileInfo, 20000)\n\n\t\/\/ spawn four worker goroutines\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor inF := range files {\n\t\t\t\tinFile, err := btrfs.Open(path.Join(m.In, m.Commit, job.Input, inF.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer inFile.Close()\n\n\t\t\t\tvar resp *http.Response\n\t\t\t\terr = retry(func() error {\n\t\t\t\t\tlog.Print(\"Posting: \", inF.Name())\n\t\t\t\t\tresp, err = http.Post(\"http:\/\/\"+path.Join(host, inF.Name()), \"application\/text\", inFile)\n\t\t\t\t\treturn err\n\t\t\t\t}, 5, 200*time.Millisecond)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer resp.Body.Close()\n\n\t\t\t\toutFile, err := btrfs.Create(path.Join(m.Out, m.Branch, jobPath, inF.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer outFile.Close()\n\t\t\t\tif _, err := io.Copy(outFile, resp.Body); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor _, inF := range inFiles {\n\t\tfiles <- inF\n\t}\n\n\tclose(files)\n\n\treturn nil\n}\n\nfunc Reduce(job Job, jobPath string, m materializeInfo, host string, shard, modulos uint64) error {\n\tif (route.HashResource(path.Join(\"\/job\", jobPath)) % modulos) != shard {\n\t\t\/\/ This resource isn't supposed to be located on this machine.\n\t\treturn nil\n\t}\n\tlog.Print(\"Reduce: \", job, \" \", jobPath, \" \")\n\tif job.Type != \"reduce\" {\n\t\treturn fmt.Errorf(\"Reduce called on a job of type \\\"%s\\\". Should be \\\"reduce\\\".\", job.Type)\n\t}\n\n\t\/\/ Notice we're just passing \"host\" here. Multicast will fill in the host\n\t\/\/ field so we don't actually need to specify it.\n\treq, err := http.NewRequest(\"GET\", \"http:\/\/host\/\"+path.Join(job.Input, \"file\", \"*\")+\"?commit=\"+m.Commit, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_reader, err := route.Multicast(req, \"\/pfs\/master\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer _reader.Close()\n\treader := iotest.NewReadLogger(\"Reduce\", _reader)\n\n\tvar resp *http.Response\n\terr = retry(func() error {\n\t\tresp, err = http.Post(\"http:\/\/\"+path.Join(host, job.Input), \"application\/text\", reader)\n\t\treturn err\n\t}, 5, 200*time.Millisecond)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Received error %s\", resp.Status)\n\t}\n\n\toutFile, err := btrfs.Create(path.Join(m.Out, m.Branch, jobPath))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\tif _, err := io.Copy(outFile, resp.Body); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype jobCond struct {\n\tsync.Cond\n\tDone bool\n}\n\nvar jobs map[string]*jobCond = make(map[string]*jobCond)\nvar jobsAccess sync.Mutex\n\n\/\/ jobCond returns the name of the condition variable for job.\nfunc condKey(in_repo, commit, job string) string {\n\treturn path.Join(in_repo, commit, job)\n}\n\nfunc ensureCond(name string) {\n\tjobsAccess.Lock()\n\tdefer jobsAccess.Unlock()\n\tif _, ok := jobs[name]; !ok {\n\t\tjobs[name] = &jobCond{sync.Cond{L: &sync.Mutex{}}, false}\n\t}\n}\n\nfunc broadcast(in_repo, commit, job string) {\n\tname := condKey(in_repo, commit, job)\n\tensureCond(name)\n\tjobs[name].L.Lock()\n\tjobs[name].Done = true\n\tjobs[name].Broadcast()\n\tjobs[name].L.Unlock()\n}\n\nfunc WaitJob(in_repo, commit, job string) {\n\tname := condKey(in_repo, commit, job)\n\tensureCond(name)\n\tjobs[name].L.Lock()\n\tfor !jobs[name].Done {\n\t\tjobs[name].Wait()\n\t}\n\tjobs[name].L.Unlock()\n}\n\n\/\/ Materialize parses the jobs found in `in_repo`\/`commit`\/`jobDir` runs them\n\/\/ with `in_repo\/commit` as input, outputs the results to `out_repo`\/`branch`\n\/\/ and commits them as `out_repo`\/`commit`\nfunc Materialize(in_repo, branch, commit, out_repo, jobDir string, shard, modulos uint64) error {\n\tlog.Printf(\"Materialize: %s %s %s %s %s.\", in_repo, branch, commit, out_repo, jobDir)\n\t\/\/ We make sure that this function always commits so that we know the comp\n\t\/\/ repo stays in sync with the data repo.\n\tdefer func() {\n\t\tif err := btrfs.Commit(out_repo, commit, branch); err != nil {\n\t\t\tlog.Print(\"btrfs.Commit error in Materialize: \", err)\n\t\t}\n\t}()\n\t\/\/ First check if the jobs dir actually exists.\n\texists, err := btrfs.FileExists(path.Join(in_repo, commit, jobDir))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\t\/\/ Perfectly valid to have no jobs dir, it just means we have no work\n\t\t\/\/ to do.\n\t\treturn nil\n\t}\n\n\tdocker, err := dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnewFiles, err := btrfs.NewFiles(in_repo, commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsort.Strings(newFiles)\n\n\tjobsPath := path.Join(in_repo, commit, jobDir)\n\tjobs, err := btrfs.ReadDir(jobsPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor _, jobInfo := range jobs {\n\t\twg.Add(1)\n\t\tgo func(jobInfo os.FileInfo) {\n\t\t\tdefer wg.Done()\n\t\t\tdefer broadcast(in_repo, commit, jobInfo.Name())\n\t\t\tjobFile, err := btrfs.Open(path.Join(jobsPath, jobInfo.Name()))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer jobFile.Close()\n\t\t\tdecoder := json.NewDecoder(jobFile)\n\t\t\tjob := Job{}\n\t\t\tif err = decoder.Decode(&job); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Print(\"Job: \", job)\n\t\t\tm := materializeInfo{in_repo, out_repo, branch, commit}\n\n\t\t\tcontainerId, err := spinupContainer(job.Image, job.Command)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer docker.StopContainer(containerId, 5)\n\n\t\t\tcontainerAddr, err := ipAddr(containerId)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif job.Type == \"map\" {\n\t\t\t\terr := Map(job, jobInfo.Name(), m, containerAddr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else if job.Type == \"reduce\" {\n\t\t\t\terr := Reduce(job, jobInfo.Name(), m, containerAddr, shard, modulos)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Job %s has unrecognized type: %s.\", jobInfo.Name(), job.Type)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(jobInfo)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/**********************************************************\\\n| |\n| hprose |\n| |\n| Official WebSite: http:\/\/www.hprose.com\/ |\n| http:\/\/www.hprose.org\/ |\n| |\n\\**********************************************************\/\n\/**********************************************************\\\n * *\n * rpc\/service_filter.go *\n * *\n * hprose service filter for Go. *\n * *\n * LastModified: Oct 27, 2016 *\n * Author: Ma Bingyao <andot@hprose.com> *\n * *\n\\**********************************************************\/\n\npackage jsonrpc\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/hprose\/hprose-golang\/io\"\n\t\"github.com\/hprose\/hprose-golang\/rpc\"\n)\n\n\/\/ ServiceFilter is a JSONRPC Service Filter\ntype ServiceFilter struct{}\n\n\/\/ InputFilter for JSONRPC Service\nfunc (filter ServiceFilter) InputFilter(data []byte, context rpc.Context) []byte {\n\tif (len(data) > 0) && (data[0] == '[' || data[0] == '{') {\n\t\tvar requests []map[string]interface{}\n\t\tif data[0] == '[' {\n\t\t\tif err := json.Unmarshal(data, &requests); err != nil {\n\t\t\t\treturn data\n\t\t\t}\n\t\t} else {\n\t\t\trequests = make([]map[string]interface{}, 1)\n\t\t\tif err := json.Unmarshal(data, &requests[0]); err != nil {\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\twriter := io.NewWriter(true)\n\t\tn := len(requests)\n\t\tresponses := make([]map[string]interface{}, n)\n\t\tfor i, request := range requests {\n\t\t\tresponse := make(map[string]interface{})\n\t\t\tif id, ok := request[\"id\"]; ok {\n\t\t\t\tresponse[\"id\"] = id\n\t\t\t} else {\n\t\t\t\tresponse[\"id\"] = nil\n\t\t\t}\n\t\t\tif version, ok := request[\"jsonrpc\"]; ok {\n\t\t\t\tresponse[\"jsonrpc\"] = version\n\t\t\t} else {\n\t\t\t\tif version, ok := request[\"version\"]; ok {\n\t\t\t\t\tresponse[\"version\"] = version\n\t\t\t\t}\n\t\t\t\tresponse[\"result\"] = nil\n\t\t\t\tresponse[\"error\"] = nil\n\t\t\t}\n\t\t\tresponses[i] = response\n\t\t\tif method, ok := request[\"method\"].(string); ok && method != \"\" {\n\t\t\t\twriter.WriteByte(io.TagCall)\n\t\t\t\twriter.WriteString(method)\n\t\t\t\tif params, ok := request[\"params\"].([]interface{}); ok && params != nil && len(params) > 0 {\n\t\t\t\t\twriter.Serialize(params)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twriter.WriteByte(io.TagEnd)\n\t\tdata = writer.Bytes()\n\t\tcontext.SetInterface(\"jsonrpc\", responses)\n\t}\n\treturn data\n}\n\n\/\/ OutputFilter for JSONRPC Service\nfunc (filter ServiceFilter) OutputFilter(data []byte, context rpc.Context) []byte {\n\tresponses, ok := context.GetInterface(\"jsonrpc\").([]map[string]interface{})\n\tif ok && responses != nil {\n\t\treader := io.NewReader(data, false)\n\t\treader.JSONCompatible = true\n\t\ttag, _ := reader.ReadByte()\n\t\tfor _, response := range responses {\n\t\t\tif tag == io.TagResult {\n\t\t\t\treader.Reset()\n\t\t\t\tvar result interface{}\n\t\t\t\treader.Unserialize(&result)\n\t\t\t\tresponse[\"result\"] = result\n\t\t\t\ttag, _ = reader.ReadByte()\n\t\t\t} else if tag == io.TagError {\n\t\t\t\treader.Reset()\n\t\t\t\terr := make(map[string]interface{})\n\t\t\t\terr[\"code\"] = -1\n\t\t\t\tmessage := reader.ReadString()\n\t\t\t\terr[\"message\"] = message\n\t\t\t\ttag, _ = reader.ReadByte()\n\t\t\t\tresponse[\"error\"] = err\n\t\t\t}\n\t\t\tif tag == io.TagEnd {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif len(responses) == 1 {\n\t\t\tdata, _ = json.Marshal(responses[0])\n\t\t} else {\n\t\t\tdata, _ = json.Marshal(responses)\n\t\t}\n\t}\n\treturn data\n}\n<commit_msg>Improved jsonrpc service filter<commit_after>\/**********************************************************\\\n| |\n| hprose |\n| |\n| Official WebSite: http:\/\/www.hprose.com\/ |\n| http:\/\/www.hprose.org\/ |\n| |\n\\**********************************************************\/\n\/**********************************************************\\\n * *\n * rpc\/service_filter.go *\n * *\n * hprose service filter for Go. *\n * *\n * LastModified: Oct 29, 2016 *\n * Author: Ma Bingyao <andot@hprose.com> *\n * *\n\\**********************************************************\/\n\npackage jsonrpc\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/hprose\/hprose-golang\/io\"\n\t\"github.com\/hprose\/hprose-golang\/rpc\"\n)\n\n\/\/ ServiceFilter is a JSONRPC Service Filter\ntype ServiceFilter struct{}\n\nfunc createResponse(request map[string]interface{}) (response map[string]interface{}) {\n\tresponse = make(map[string]interface{})\n\tif id, ok := request[\"id\"]; ok {\n\t\tresponse[\"id\"] = id\n\t} else {\n\t\tresponse[\"id\"] = nil\n\t}\n\tif version, ok := request[\"jsonrpc\"]; ok {\n\t\tresponse[\"jsonrpc\"] = version\n\t} else {\n\t\tif version, ok := request[\"version\"]; ok {\n\t\t\tresponse[\"version\"] = version\n\t\t}\n\t\tresponse[\"result\"] = nil\n\t\tresponse[\"error\"] = nil\n\t}\n\treturn\n}\n\n\/\/ InputFilter for JSONRPC Service\nfunc (filter ServiceFilter) InputFilter(data []byte, context rpc.Context) []byte {\n\tif (len(data) > 0) && (data[0] == '[' || data[0] == '{') {\n\t\tvar requests []map[string]interface{}\n\t\tif data[0] == '[' {\n\t\t\tif err := json.Unmarshal(data, &requests); err != nil {\n\t\t\t\treturn data\n\t\t\t}\n\t\t} else {\n\t\t\trequests = make([]map[string]interface{}, 1)\n\t\t\tif err := json.Unmarshal(data, &requests[0]); err != nil {\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\twriter := io.NewWriter(true)\n\t\tn := len(requests)\n\t\tresponses := make([]map[string]interface{}, n)\n\t\tfor i, request := range requests {\n\t\t\tresponses[i] = createResponse(request)\n\t\t\tif method, ok := request[\"method\"].(string); ok && method != \"\" {\n\t\t\t\twriter.WriteByte(io.TagCall)\n\t\t\t\twriter.WriteString(method)\n\t\t\t\tif params, ok := request[\"params\"].([]interface{}); ok && params != nil && len(params) > 0 {\n\t\t\t\t\twriter.Serialize(params)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twriter.WriteByte(io.TagEnd)\n\t\tdata = writer.Bytes()\n\t\tcontext.SetInterface(\"jsonrpc\", responses)\n\t}\n\treturn data\n}\n\n\/\/ OutputFilter for JSONRPC Service\nfunc (filter ServiceFilter) OutputFilter(data []byte, context rpc.Context) []byte {\n\tresponses, ok := context.GetInterface(\"jsonrpc\").([]map[string]interface{})\n\tif ok && responses != nil {\n\t\treader := io.NewReader(data, false)\n\t\treader.JSONCompatible = true\n\t\ttag, _ := reader.ReadByte()\n\t\tfor _, response := range responses {\n\t\t\tif tag == io.TagResult {\n\t\t\t\treader.Reset()\n\t\t\t\tvar result interface{}\n\t\t\t\treader.Unserialize(&result)\n\t\t\t\tresponse[\"result\"] = result\n\t\t\t\ttag, _ = reader.ReadByte()\n\t\t\t} else if tag == io.TagError {\n\t\t\t\treader.Reset()\n\t\t\t\terr := make(map[string]interface{})\n\t\t\t\terr[\"code\"] = -1\n\t\t\t\tmessage := reader.ReadString()\n\t\t\t\terr[\"message\"] = message\n\t\t\t\ttag, _ = reader.ReadByte()\n\t\t\t\tresponse[\"error\"] = err\n\t\t\t}\n\t\t\tif tag == io.TagEnd {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif len(responses) == 1 {\n\t\t\tdata, _ = json.Marshal(responses[0])\n\t\t} else {\n\t\t\tdata, _ = json.Marshal(responses)\n\t\t}\n\t}\n\treturn data\n}\n<|endoftext|>"} {"text":"<commit_before>package rpc\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"xd\/lib\/bittorrent\/swarm\"\n\t\"xd\/lib\/config\"\n\t\"xd\/lib\/log\"\n\t\"xd\/lib\/rpc\"\n\t\"xd\/lib\/util\"\n)\n\nvar formatRate = util.FormatRate\n\nfunc Run() {\n\tvar args []string\n\tcmd := \"list\"\n\tfname := \"torrents.ini\"\n\tif len(os.Args) > 1 {\n\t\tcmd = os.Args[1]\n\t\targs = os.Args[2:]\n\t}\n\tcfg := new(config.Config)\n\terr := cfg.Load(fname)\n\tif err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t\treturn\n\t}\n\tlog.SetLevel(cfg.Log.Level)\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: cfg.RPC.Bind,\n\t\tPath: rpc.RPCPath,\n\t}\n\tc := rpc.NewClient(u.String())\n\n\tswitch strings.ToLower(cmd) {\n\tcase \"list\":\n\t\tlistTorrents(c)\n\tcase \"add\":\n\t\taddTorrents(c, args...)\n\tcase \"set-piece-window\":\n\t\tsetPieceWindow(c, args[0])\n\t}\n}\n\nfunc setPieceWindow(c *rpc.Client, str string) {\n\tn, err := strconv.Atoi(str)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %s\", err.Error())\n\t}\n\tc.SetPieceWindow(n)\n}\n\nfunc addTorrents(c *rpc.Client, urls ...string) {\n\tfor idx := range urls {\n\t\tfmt.Printf(\"fetch %s\", urls[idx])\n\t\tc.AddTorrent(urls[idx])\n\t}\n}\n\nfunc listTorrents(c *rpc.Client) {\n\tvar err error\n\tvar list swarm.TorrentsList\n\tlist, err = c.ListTorrents()\n\tif err != nil {\n\t\tlog.Errorf(\"rpc error: %s\", err)\n\t\treturn\n\t}\n\tvar globalTx, globalRx float64\n\n\tvar torrents swarm.TorrentStatusList\n\tsort.Stable(&list.Infohashes)\n\n\tfor _, ih := range list.Infohashes {\n\t\tvar status swarm.TorrentStatus\n\t\tstatus, err = c.SwarmStatus(ih)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"rpc error: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\ttorrents = append(torrents, status)\n\n\t}\n\tsort.Stable(&torrents)\n\tfor _, status := range torrents {\n\t\tvar tx, rx float64\n\t\tfmt.Printf(\"%s [%s] %s\\n\", status.Name, status.Infohash, status.Bitfield.Percent())\n\t\tfmt.Println(\"peers:\")\n\t\tsort.Stable(&status.Peers)\n\t\tfor _, peer := range status.Peers {\n\t\t\tpad := peer.ID\n\n\t\t\tfor len(pad) < 65 {\n\t\t\t\tpad += \" \"\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t%stx=%s rx=%s\\n\", pad, formatRate(peer.TX), formatRate(peer.RX))\n\t\t\ttx += peer.TX\n\t\t\trx += peer.RX\n\t\t}\n\t\tfmt.Printf(\"%s tx=%s rx=%s\\n\", status.State, formatRate(tx), formatRate(rx))\n\t\tfmt.Println(\"files:\")\n\t\tfor idx, f := range status.Files {\n\t\t\tfmt.Printf(\"\\t[%d] %s (%s)\\n\", idx, f.FileInfo.Path.FilePath(), f.Progress.Percent())\n\t\t}\n\t\tfmt.Println()\n\t\tglobalRx += rx\n\t\tglobalTx += tx\n\t}\n\tfmt.Println()\n\tfmt.Printf(\"%d torrents: tx=%s rx=%s\\n\", list.Infohashes.Len(), formatRate(globalTx), formatRate(globalRx))\n}\n<commit_msg>leftpad rate in rpc tool<commit_after>package rpc\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"xd\/lib\/bittorrent\/swarm\"\n\t\"xd\/lib\/config\"\n\t\"xd\/lib\/log\"\n\t\"xd\/lib\/rpc\"\n\t\"xd\/lib\/util\"\n)\n\nfunc formatRate(r float64) string {\n\tstr := util.FormatRate(r)\n\tfor len(str) < 12 {\n\t\tstr += \" \"\n\t}\n\treturn str\n}\n\nfunc Run() {\n\tvar args []string\n\tcmd := \"list\"\n\tfname := \"torrents.ini\"\n\tif len(os.Args) > 1 {\n\t\tcmd = os.Args[1]\n\t\targs = os.Args[2:]\n\t}\n\tcfg := new(config.Config)\n\terr := cfg.Load(fname)\n\tif err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t\treturn\n\t}\n\tlog.SetLevel(cfg.Log.Level)\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: cfg.RPC.Bind,\n\t\tPath: rpc.RPCPath,\n\t}\n\tc := rpc.NewClient(u.String())\n\n\tswitch strings.ToLower(cmd) {\n\tcase \"list\":\n\t\tlistTorrents(c)\n\tcase \"add\":\n\t\taddTorrents(c, args...)\n\tcase \"set-piece-window\":\n\t\tsetPieceWindow(c, args[0])\n\t}\n}\n\nfunc setPieceWindow(c *rpc.Client, str string) {\n\tn, err := strconv.Atoi(str)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %s\", err.Error())\n\t}\n\tc.SetPieceWindow(n)\n}\n\nfunc addTorrents(c *rpc.Client, urls ...string) {\n\tfor idx := range urls {\n\t\tfmt.Printf(\"fetch %s\", urls[idx])\n\t\tc.AddTorrent(urls[idx])\n\t}\n}\n\nfunc listTorrents(c *rpc.Client) {\n\tvar err error\n\tvar list swarm.TorrentsList\n\tlist, err = c.ListTorrents()\n\tif err != nil {\n\t\tlog.Errorf(\"rpc error: %s\", err)\n\t\treturn\n\t}\n\tvar globalTx, globalRx float64\n\n\tvar torrents swarm.TorrentStatusList\n\tsort.Stable(&list.Infohashes)\n\n\tfor _, ih := range list.Infohashes {\n\t\tvar status swarm.TorrentStatus\n\t\tstatus, err = c.SwarmStatus(ih)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"rpc error: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\ttorrents = append(torrents, status)\n\n\t}\n\tsort.Stable(&torrents)\n\tfor _, status := range torrents {\n\t\tvar tx, rx float64\n\t\tfmt.Printf(\"%s [%s] %s\\n\", status.Name, status.Infohash, status.Bitfield.Percent())\n\t\tfmt.Println(\"peers:\")\n\t\tsort.Stable(&status.Peers)\n\t\tfor _, peer := range status.Peers {\n\t\t\tpad := peer.ID\n\n\t\t\tfor len(pad) < 65 {\n\t\t\t\tpad += \" \"\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t%stx=%s rx=%s\\n\", pad, formatRate(peer.TX), formatRate(peer.RX))\n\t\t\ttx += peer.TX\n\t\t\trx += peer.RX\n\t\t}\n\t\tfmt.Printf(\"%s tx=%s rx=%s\\n\", status.State, formatRate(tx), formatRate(rx))\n\t\tfmt.Println(\"files:\")\n\t\tfor idx, f := range status.Files {\n\t\t\tfmt.Printf(\"\\t[%d] %s (%s)\\n\", idx, f.FileInfo.Path.FilePath(), f.Progress.Percent())\n\t\t}\n\t\tfmt.Println()\n\t\tglobalRx += rx\n\t\tglobalTx += tx\n\t}\n\tfmt.Println()\n\tfmt.Printf(\"%d torrents: tx=%s rx=%s\\n\", list.Infohashes.Len(), formatRate(globalTx), formatRate(globalRx))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/Xe\/middleware\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/drone\/routes\"\n\t\"github.com\/goincremental\/negroni-sessions\"\n\t\"stevenbooru.cf\/csrf\"\n\t\"stevenbooru.cf\/eye\"\n\t. \"stevenbooru.cf\/globals\"\n\t\"stevenbooru.cf\/models\"\n)\n\nfunc main() {\n\tmux := routes.New()\n\n\tmux.Get(\"\/\", func(rw http.ResponseWriter, r *http.Request) {\n\t\teye.DoTemplate(\"index\", rw, r, nil)\n\t})\n\n\tmux.Get(\"\/login\", func(rw http.ResponseWriter, r *http.Request) {\n\t\ttok := csrf.SetToken(r)\n\t\teye.DoTemplate(\"users\/login\", rw, r, tok)\n\t})\n\n\tmux.Get(\"\/register\", func(rw http.ResponseWriter, r *http.Request) {\n\t\ttok := csrf.SetToken(r)\n\t\teye.DoTemplate(\"users\/register\", rw, r, tok)\n\t})\n\n\tmux.Post(\"\/register\", func(rw http.ResponseWriter, r *http.Request) {\n\t\terr := r.ParseForm()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ttok := r.PostForm.Get(\"token\")\n\t\tif !csrf.CheckToken(tok, r) {\n\t\t\teye.HandleError(rw, r, errors.New(\"Invalid CSRF token\"))\n\t\t\treturn\n\t\t}\n\n\t\t_, err = models.NewUser(r.PostForm)\n\t\tif err != nil {\n\t\t\teye.HandleError(rw, r, err)\n\t\t}\n\t})\n\n\tn := negroni.Classic()\n\n\tn.Use(sessions.Sessions(\"stevenbooru\", CookieStore))\n\tmiddleware.Inject(n)\n\tn.UseHandler(mux)\n\n\tn.Run(fmt.Sprintf(\"%s:%s\", Config.HTTP.Bindhost, Config.HTTP.Port))\n}\n<commit_msg>stevenbooru: set the active user UID on register<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/Xe\/middleware\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/drone\/routes\"\n\t\"github.com\/goincremental\/negroni-sessions\"\n\t\"stevenbooru.cf\/csrf\"\n\t\"stevenbooru.cf\/eye\"\n\t. \"stevenbooru.cf\/globals\"\n\t\"stevenbooru.cf\/models\"\n)\n\nfunc main() {\n\tmux := routes.New()\n\n\tmux.Get(\"\/\", func(rw http.ResponseWriter, r *http.Request) {\n\t\teye.DoTemplate(\"index\", rw, r, nil)\n\t})\n\n\tmux.Get(\"\/login\", func(rw http.ResponseWriter, r *http.Request) {\n\t\ttok := csrf.SetToken(r)\n\t\teye.DoTemplate(\"users\/login\", rw, r, tok)\n\t})\n\n\tmux.Get(\"\/register\", func(rw http.ResponseWriter, r *http.Request) {\n\t\ttok := csrf.SetToken(r)\n\t\teye.DoTemplate(\"users\/register\", rw, r, tok)\n\t})\n\n\tmux.Post(\"\/register\", func(rw http.ResponseWriter, r *http.Request) {\n\t\terr := r.ParseForm()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tsess := sessions.GetSession(r)\n\n\t\ttok := r.PostForm.Get(\"token\")\n\t\tif !csrf.CheckToken(tok, r) {\n\t\t\teye.HandleError(rw, r, errors.New(\"Invalid CSRF token\"))\n\t\t\treturn\n\t\t}\n\n\t\tu, err := models.NewUser(r.PostForm)\n\t\tif err != nil {\n\t\t\teye.HandleError(rw, r, err)\n\t\t}\n\n\t\tsess.Set(\"uid\", u.UUID)\n\t})\n\n\tn := negroni.Classic()\n\n\tn.Use(sessions.Sessions(\"stevenbooru\", CookieStore))\n\tmiddleware.Inject(n)\n\tn.UseHandler(mux)\n\n\tn.Run(fmt.Sprintf(\"%s:%s\", Config.HTTP.Bindhost, Config.HTTP.Port))\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\r\n\r\nimport (\r\n\t\"zvr\/server\"\r\n\t\"fmt\"\r\n\t\"zvr\/utils\"\r\n\t\"strings\"\r\n\tlog \"github.com\/Sirupsen\/logrus\"\r\n)\r\n\r\nconst (\r\n\tVR_CREATE_EIP = \"\/createeip\"\r\n\tVR_REMOVE_EIP = \"\/removeeip\"\r\n\tVR_SYNC_EIP = \"\/synceip\"\r\n)\r\n\r\ntype eipInfo struct {\r\n\tVipIp string `json:\"vipIp\"`\r\n\tPrivateMac string `json:\"privateMac\"`\r\n\tGuestIp string `json:\"guestIp\"`\r\n\tPublicMac string `json:\"publicMac\"`\r\n\tSnatInboundTraffic bool `json:\"snatInboundTraffic\"`\r\n}\r\n\r\ntype setEipCmd struct {\r\n\tEip eipInfo `json:\"eip\"`\r\n}\r\n\r\ntype removeEipCmd struct {\r\n\tEip eipInfo `json:\"eip\"`\r\n}\r\n\r\ntype syncEipCmd struct {\r\n\tEips []eipInfo `json:\"eips\"`\r\n}\r\n\r\nvar EIP_SNAT_START_RULE_NUM = 5000\r\n\r\nfunc makeEipDescription(info eipInfo) string {\r\n\treturn fmt.Sprintf(\"EIP-%v-%v-%v\", info.VipIp, info.GuestIp, info.PrivateMac)\r\n}\r\n\r\nfunc makeEipDescriptionReg(info eipInfo) string {\r\n\treturn fmt.Sprintf(\"^EIP-%v-\", info.VipIp)\r\n}\r\n\r\nfunc makeEipDescriptionForPrivateMac(info eipInfo) string {\r\n\treturn fmt.Sprintf(\"EIP-%v-%v-%v-private\", info.VipIp, info.GuestIp, info.PrivateMac)\r\n}\r\n\r\nfunc cleanupOldEip(tree *server.VyosConfigTree, eip eipInfo) {\r\n\tdesReg := makeEipDescriptionReg(eip)\r\n\tfor i := 0; i < 1; {\r\n\t\tif r := tree.FindSnatRuleDescriptionRegex(desReg, utils.StringRegCompareFn); r != nil {\r\n\t\t\tr.Delete()\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tfor i := 0; i < 1; {\r\n\t\tif r := tree.FindDnatRuleDescriptionRegex(desReg, utils.StringRegCompareFn); r != nil {\r\n\t\t\tr.Delete()\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tif nics, nicErr := utils.GetAllNics(); nicErr == nil {\r\n\t\tfor _, val := range nics {\r\n\t\t\tfor i := 0; i < 1; {\r\n\t\t\t\tif r := tree.FindFirewallRuleByDescriptionRegex(val.Name, \"in\", desReg, utils.StringRegCompareFn); r != nil {\r\n\t\t\t\t\tr.Delete()\r\n\t\t\t\t} else {\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n\r\n\r\nfunc setEip(tree *server.VyosConfigTree, eip eipInfo) {\r\n\tdes := makeEipDescription(eip)\r\n\tpriDes := makeEipDescriptionForPrivateMac(eip)\r\n\tnicname, err := utils.GetNicNameByIp(eip.VipIp)\r\n\tif err != nil && eip.PublicMac != \"\" {\r\n\t\tvar nicname string\r\n\t\terr = utils.Retry(func() error {\r\n\t\t\tvar e error\r\n\t\t\tnicname, e = utils.GetNicNameByMac(eip.PublicMac)\r\n\t\t\tif e != nil {\r\n\t\t\t\treturn e\r\n\t\t\t} else {\r\n\t\t\t\treturn nil\r\n\t\t\t}\r\n\t\t}, 5, 1)\r\n\t}\r\n\tutils.PanicOnError(err)\r\n\r\n\tprinicname, err := utils.GetNicNameByMac(eip.PrivateMac); utils.PanicOnError(err)\r\n\r\n\t\/* delete old rule in case deleted failed when delete EIP *\/\r\n\tcleanupOldEip(tree, eip)\r\n\r\n\tif r := tree.FindSnatRuleDescription(des); r == nil {\r\n\t\ttree.SetSnat(\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"outbound-interface %v\", nicname),\r\n\t\t\tfmt.Sprintf(\"source address %v\", eip.GuestIp),\r\n\t\t\tfmt.Sprintf(\"translation address %v\", eip.VipIp),\r\n\t\t)\r\n\t}\r\n\r\n\tif r := tree.FindSnatRuleDescription(priDes); r == nil {\r\n\t\ttree.SetSnat(\r\n\t\t\tfmt.Sprintf(\"description %v\", priDes),\r\n\t\t\tfmt.Sprintf(\"outbound-interface %v\", prinicname),\r\n\t\t\tfmt.Sprintf(\"source address %v\", eip.GuestIp),\r\n\t\t\tfmt.Sprintf(\"translation address %v\", eip.VipIp),\r\n\t\t)\r\n\t}\r\n\r\n\tif r := tree.FindDnatRuleDescription(des); r == nil {\r\n\t\ttree.SetDnat(\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"inbound-interface any\"),\r\n\t\t\tfmt.Sprintf(\"destination address %v\", eip.VipIp),\r\n\t\t\tfmt.Sprintf(\"translation address %v\", eip.GuestIp),\r\n\t\t)\r\n\t}\r\n\r\n\tif r := tree.FindFirewallRuleByDescription(nicname, \"in\", des); r == nil {\r\n\t\ttree.SetFirewallOnInterface(nicname, \"in\",\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"destination address %v\", eip.GuestIp),\r\n\t\t\t\"state new enable\",\r\n\t\t\t\"state established enable\",\r\n\t\t\t\"state related enable\",\r\n\t\t\t\"action accept\",\r\n\t\t)\r\n\r\n\t\ttree.AttachFirewallToInterface(nicname, \"in\")\r\n\t}\r\n\r\n\tif r := tree.FindFirewallRuleByDescription(prinicname, \"in\", des); r == nil {\r\n\t\ttree.SetFirewallOnInterface(prinicname, \"in\",\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"source address %v\", eip.GuestIp),\r\n\t\t\t\"state new enable\",\r\n\t\t\t\"state established enable\",\r\n\t\t\t\"state related enable\",\r\n\t\t\t\"action accept\",\r\n\t\t)\r\n\r\n\t\ttree.AttachFirewallToInterface(prinicname, \"in\")\r\n\t}\r\n}\r\n\r\nfunc checkEipExists(eip eipInfo) error {\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\tdes := makeEipDescription(eip)\r\n\tpriDes := makeEipDescriptionForPrivateMac(eip)\r\n\r\n\tif r := tree.FindSnatRuleDescription(des); r != nil {\r\n\t\treturn fmt.Errorf(\"%s snat deletion fail\", des)\r\n\t}\r\n\r\n\tif r := tree.FindSnatRuleDescription(priDes); r != nil {\r\n\t\treturn fmt.Errorf(\"%s snat deletion fail\", priDes)\r\n\t}\r\n\r\n\tif r := tree.FindDnatRuleDescription(des); r != nil {\r\n\t\treturn fmt.Errorf(\"%s dnat deletion fail\", des)\r\n\t}\r\n\r\n\tlog.Debugf(\"checkEipExists %v des %s priDes %s successfuuly deleted\", eip, des, priDes)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc deleteEip(tree *server.VyosConfigTree, eip eipInfo) {\r\n\tdes := makeEipDescription(eip)\r\n\tpriDes := makeEipDescriptionForPrivateMac(eip)\r\n\tnicname, err := utils.GetNicNameByIp(eip.VipIp)\r\n\tif err != nil && eip.PublicMac != \"\" {\r\n\t\tvar nicname string\r\n\t\terr = utils.Retry(func() error {\r\n\t\t\tvar e error\r\n\t\t\tnicname, e = utils.GetNicNameByMac(eip.PublicMac)\r\n\t\t\tif e != nil {\r\n\t\t\t\treturn e\r\n\t\t\t} else {\r\n\t\t\t\treturn nil\r\n\t\t\t}\r\n\t\t}, 5, 1)\r\n\t}\r\n\tutils.PanicOnError(err)\r\n\r\n\r\n\tif r := tree.FindSnatRuleDescription(des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tif r := tree.FindSnatRuleDescription(priDes); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tif r := tree.FindDnatRuleDescription(des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tif r := tree.FindFirewallRuleByDescription(nicname, \"in\", des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tprinicname, err := utils.GetNicNameByMac(eip.PrivateMac); utils.PanicOnError(err)\r\n\tif r := tree.FindFirewallRuleByDescription(prinicname, \"in\", des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n}\r\n\r\nfunc createEip(ctx *server.CommandContext) interface{} {\r\n\tcmd := &setEipCmd{}\r\n\tctx.GetCommand(cmd)\r\n\teip := cmd.Eip\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\tsetEip(tree, eip)\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc removeEip(ctx *server.CommandContext) interface{} {\r\n\tcmd := &removeEipCmd{}\r\n\tctx.GetCommand(cmd)\r\n\teip := cmd.Eip\r\n\r\n\terr := utils.Retry(func() error {\r\n tree := server.NewParserFromShowConfiguration().Tree\r\n deleteEip(tree, eip)\r\n tree.Apply(false)\r\n\r\n return checkEipExists(eip);\r\n }, 3, 1); utils.LogError(err)\r\n\treturn nil\r\n}\r\n\r\nfunc syncEip(ctx *server.CommandContext) interface{} {\r\n\tcmd := &syncEipCmd{}\r\n\tctx.GetCommand(cmd)\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\r\n\t\/\/ delete all EIP related rules\r\n\tif rs := tree.Get(\"nat destination rule\"); rs != nil {\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif d := r.Get(\"description\"); d != nil && strings.HasPrefix(d.Value(), \"EIP\") {\r\n\t\t\t\tr.Delete()\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tif rs := tree.Getf(\"nat source rule\"); rs != nil {\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif d := r.Get(\"description\"); d != nil && strings.HasPrefix(d.Value(), \"EIP\") {\r\n\t\t\t\tr.Delete()\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tif rs := tree.Getf(\"firewall name\"); rs != nil {\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif rss := r.Get(\"rule\"); rss != nil {\r\n\t\t\t\tfor _, rr := range rss.Children() {\r\n\t\t\t\t\tif d := rr.Get(\"description\"); d != nil && strings.HasPrefix(d.Value(), \"EIP\") {\r\n\t\t\t\t\t\trr.Delete()\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, eip := range cmd.Eips {\r\n\t\tsetEip(tree, eip)\r\n\t}\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc EipEntryPoint() {\r\n\tserver.RegisterAsyncCommandHandler(VR_CREATE_EIP, server.VyosLock(createEip))\r\n\tserver.RegisterAsyncCommandHandler(VR_REMOVE_EIP, server.VyosLock(removeEip))\r\n\tserver.RegisterAsyncCommandHandler(VR_SYNC_EIP, server.VyosLock(syncEip))\r\n}\r\n<commit_msg>Avoid empty nic name in set eip<commit_after>package plugin\r\n\r\nimport (\r\n\t\"zvr\/server\"\r\n\t\"fmt\"\r\n\t\"zvr\/utils\"\r\n\t\"strings\"\r\n\tlog \"github.com\/Sirupsen\/logrus\"\r\n)\r\n\r\nconst (\r\n\tVR_CREATE_EIP = \"\/createeip\"\r\n\tVR_REMOVE_EIP = \"\/removeeip\"\r\n\tVR_SYNC_EIP = \"\/synceip\"\r\n)\r\n\r\ntype eipInfo struct {\r\n\tVipIp string `json:\"vipIp\"`\r\n\tPrivateMac string `json:\"privateMac\"`\r\n\tGuestIp string `json:\"guestIp\"`\r\n\tPublicMac string `json:\"publicMac\"`\r\n\tSnatInboundTraffic bool `json:\"snatInboundTraffic\"`\r\n}\r\n\r\ntype setEipCmd struct {\r\n\tEip eipInfo `json:\"eip\"`\r\n}\r\n\r\ntype removeEipCmd struct {\r\n\tEip eipInfo `json:\"eip\"`\r\n}\r\n\r\ntype syncEipCmd struct {\r\n\tEips []eipInfo `json:\"eips\"`\r\n}\r\n\r\nvar EIP_SNAT_START_RULE_NUM = 5000\r\n\r\nfunc makeEipDescription(info eipInfo) string {\r\n\treturn fmt.Sprintf(\"EIP-%v-%v-%v\", info.VipIp, info.GuestIp, info.PrivateMac)\r\n}\r\n\r\nfunc makeEipDescriptionReg(info eipInfo) string {\r\n\treturn fmt.Sprintf(\"^EIP-%v-\", info.VipIp)\r\n}\r\n\r\nfunc makeEipDescriptionForPrivateMac(info eipInfo) string {\r\n\treturn fmt.Sprintf(\"EIP-%v-%v-%v-private\", info.VipIp, info.GuestIp, info.PrivateMac)\r\n}\r\n\r\nfunc cleanupOldEip(tree *server.VyosConfigTree, eip eipInfo) {\r\n\tdesReg := makeEipDescriptionReg(eip)\r\n\tfor i := 0; i < 1; {\r\n\t\tif r := tree.FindSnatRuleDescriptionRegex(desReg, utils.StringRegCompareFn); r != nil {\r\n\t\t\tr.Delete()\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tfor i := 0; i < 1; {\r\n\t\tif r := tree.FindDnatRuleDescriptionRegex(desReg, utils.StringRegCompareFn); r != nil {\r\n\t\t\tr.Delete()\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\tif nics, nicErr := utils.GetAllNics(); nicErr == nil {\r\n\t\tfor _, val := range nics {\r\n\t\t\tfor i := 0; i < 1; {\r\n\t\t\t\tif r := tree.FindFirewallRuleByDescriptionRegex(val.Name, \"in\", desReg, utils.StringRegCompareFn); r != nil {\r\n\t\t\t\t\tr.Delete()\r\n\t\t\t\t} else {\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n\r\n\r\nfunc setEip(tree *server.VyosConfigTree, eip eipInfo) {\r\n\tdes := makeEipDescription(eip)\r\n\tpriDes := makeEipDescriptionForPrivateMac(eip)\r\n\tnicname, err := utils.GetNicNameByIp(eip.VipIp)\r\n\tif (nicname == \"\" || err != nil) && eip.PublicMac != \"\" {\r\n\t\tvar nicname string\r\n\t\terr = utils.Retry(func() error {\r\n\t\t\tvar e error\r\n\t\t\tnicname, e = utils.GetNicNameByMac(eip.PublicMac)\r\n\t\t\tif e != nil {\r\n\t\t\t\treturn e\r\n\t\t\t} else if nicname == \"\" {\r\n\t\t\t\treturn fmt.Errorf(\"empty nic name found for mac[%s]\", eip.PublicMac)\r\n\t\t\t} else {\r\n\t\t\t\treturn nil\r\n\t\t\t}\r\n\t\t}, 5, 1)\r\n\t}\r\n\tutils.PanicOnError(err)\r\n\r\n\tprinicname, err := utils.GetNicNameByMac(eip.PrivateMac); utils.PanicOnError(err)\r\n\r\n\t\/* delete old rule in case deleted failed when delete EIP *\/\r\n\tcleanupOldEip(tree, eip)\r\n\r\n\tif r := tree.FindSnatRuleDescription(des); r == nil {\r\n\t\ttree.SetSnat(\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"outbound-interface %v\", nicname),\r\n\t\t\tfmt.Sprintf(\"source address %v\", eip.GuestIp),\r\n\t\t\tfmt.Sprintf(\"translation address %v\", eip.VipIp),\r\n\t\t)\r\n\t}\r\n\r\n\tif r := tree.FindSnatRuleDescription(priDes); r == nil {\r\n\t\ttree.SetSnat(\r\n\t\t\tfmt.Sprintf(\"description %v\", priDes),\r\n\t\t\tfmt.Sprintf(\"outbound-interface %v\", prinicname),\r\n\t\t\tfmt.Sprintf(\"source address %v\", eip.GuestIp),\r\n\t\t\tfmt.Sprintf(\"translation address %v\", eip.VipIp),\r\n\t\t)\r\n\t}\r\n\r\n\tif r := tree.FindDnatRuleDescription(des); r == nil {\r\n\t\ttree.SetDnat(\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"inbound-interface any\"),\r\n\t\t\tfmt.Sprintf(\"destination address %v\", eip.VipIp),\r\n\t\t\tfmt.Sprintf(\"translation address %v\", eip.GuestIp),\r\n\t\t)\r\n\t}\r\n\r\n\tif r := tree.FindFirewallRuleByDescription(nicname, \"in\", des); r == nil {\r\n\t\ttree.SetFirewallOnInterface(nicname, \"in\",\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"destination address %v\", eip.GuestIp),\r\n\t\t\t\"state new enable\",\r\n\t\t\t\"state established enable\",\r\n\t\t\t\"state related enable\",\r\n\t\t\t\"action accept\",\r\n\t\t)\r\n\r\n\t\ttree.AttachFirewallToInterface(nicname, \"in\")\r\n\t}\r\n\r\n\tif r := tree.FindFirewallRuleByDescription(prinicname, \"in\", des); r == nil {\r\n\t\ttree.SetFirewallOnInterface(prinicname, \"in\",\r\n\t\t\tfmt.Sprintf(\"description %v\", des),\r\n\t\t\tfmt.Sprintf(\"source address %v\", eip.GuestIp),\r\n\t\t\t\"state new enable\",\r\n\t\t\t\"state established enable\",\r\n\t\t\t\"state related enable\",\r\n\t\t\t\"action accept\",\r\n\t\t)\r\n\r\n\t\ttree.AttachFirewallToInterface(prinicname, \"in\")\r\n\t}\r\n}\r\n\r\nfunc checkEipExists(eip eipInfo) error {\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\tdes := makeEipDescription(eip)\r\n\tpriDes := makeEipDescriptionForPrivateMac(eip)\r\n\r\n\tif r := tree.FindSnatRuleDescription(des); r != nil {\r\n\t\treturn fmt.Errorf(\"%s snat deletion fail\", des)\r\n\t}\r\n\r\n\tif r := tree.FindSnatRuleDescription(priDes); r != nil {\r\n\t\treturn fmt.Errorf(\"%s snat deletion fail\", priDes)\r\n\t}\r\n\r\n\tif r := tree.FindDnatRuleDescription(des); r != nil {\r\n\t\treturn fmt.Errorf(\"%s dnat deletion fail\", des)\r\n\t}\r\n\r\n\tlog.Debugf(\"checkEipExists %v des %s priDes %s successfuuly deleted\", eip, des, priDes)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc deleteEip(tree *server.VyosConfigTree, eip eipInfo) {\r\n\tdes := makeEipDescription(eip)\r\n\tpriDes := makeEipDescriptionForPrivateMac(eip)\r\n\tnicname, err := utils.GetNicNameByIp(eip.VipIp)\r\n\tif err != nil && eip.PublicMac != \"\" {\r\n\t\tvar nicname string\r\n\t\terr = utils.Retry(func() error {\r\n\t\t\tvar e error\r\n\t\t\tnicname, e = utils.GetNicNameByMac(eip.PublicMac)\r\n\t\t\tif e != nil {\r\n\t\t\t\treturn e\r\n\t\t\t} else {\r\n\t\t\t\treturn nil\r\n\t\t\t}\r\n\t\t}, 5, 1)\r\n\t}\r\n\tutils.PanicOnError(err)\r\n\r\n\r\n\tif r := tree.FindSnatRuleDescription(des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tif r := tree.FindSnatRuleDescription(priDes); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tif r := tree.FindDnatRuleDescription(des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tif r := tree.FindFirewallRuleByDescription(nicname, \"in\", des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n\r\n\tprinicname, err := utils.GetNicNameByMac(eip.PrivateMac); utils.PanicOnError(err)\r\n\tif r := tree.FindFirewallRuleByDescription(prinicname, \"in\", des); r != nil {\r\n\t\tr.Delete()\r\n\t}\r\n}\r\n\r\nfunc createEip(ctx *server.CommandContext) interface{} {\r\n\tcmd := &setEipCmd{}\r\n\tctx.GetCommand(cmd)\r\n\teip := cmd.Eip\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\tsetEip(tree, eip)\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc removeEip(ctx *server.CommandContext) interface{} {\r\n\tcmd := &removeEipCmd{}\r\n\tctx.GetCommand(cmd)\r\n\teip := cmd.Eip\r\n\r\n\terr := utils.Retry(func() error {\r\n tree := server.NewParserFromShowConfiguration().Tree\r\n deleteEip(tree, eip)\r\n tree.Apply(false)\r\n\r\n return checkEipExists(eip);\r\n }, 3, 1); utils.LogError(err)\r\n\treturn nil\r\n}\r\n\r\nfunc syncEip(ctx *server.CommandContext) interface{} {\r\n\tcmd := &syncEipCmd{}\r\n\tctx.GetCommand(cmd)\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\r\n\t\/\/ delete all EIP related rules\r\n\tif rs := tree.Get(\"nat destination rule\"); rs != nil {\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif d := r.Get(\"description\"); d != nil && strings.HasPrefix(d.Value(), \"EIP\") {\r\n\t\t\t\tr.Delete()\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tif rs := tree.Getf(\"nat source rule\"); rs != nil {\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif d := r.Get(\"description\"); d != nil && strings.HasPrefix(d.Value(), \"EIP\") {\r\n\t\t\t\tr.Delete()\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tif rs := tree.Getf(\"firewall name\"); rs != nil {\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif rss := r.Get(\"rule\"); rss != nil {\r\n\t\t\t\tfor _, rr := range rss.Children() {\r\n\t\t\t\t\tif d := rr.Get(\"description\"); d != nil && strings.HasPrefix(d.Value(), \"EIP\") {\r\n\t\t\t\t\t\trr.Delete()\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, eip := range cmd.Eips {\r\n\t\tsetEip(tree, eip)\r\n\t}\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc EipEntryPoint() {\r\n\tserver.RegisterAsyncCommandHandler(VR_CREATE_EIP, server.VyosLock(createEip))\r\n\tserver.RegisterAsyncCommandHandler(VR_REMOVE_EIP, server.VyosLock(removeEip))\r\n\tserver.RegisterAsyncCommandHandler(VR_SYNC_EIP, server.VyosLock(syncEip))\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package starbound\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n)\n\ntype IndexEntry struct {\n\tOffset, Size int64\n}\n\nfunc NewSBAsset6(r io.ReaderAt) (a *SBAsset6, err error) {\n\tbuf := make([]byte, 16)\n\t\/\/ Read the initial header which points at a metadata section.\n\t_, err = r.ReadAt(buf, 0)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !bytes.Equal(buf[:8], []byte(\"SBAsset6\")) {\n\t\treturn nil, ErrInvalidData\n\t}\n\ta = &SBAsset6{r: r}\n\ta.metadata = int64(binary.BigEndian.Uint64(buf[8:]))\n\t\/\/ Read the metadata section of the asset file.\n\tbuf5 := buf[:5]\n\t_, err = r.ReadAt(buf5, a.metadata)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !bytes.Equal(buf5, []byte(\"INDEX\")) {\n\t\treturn nil, ErrInvalidData\n\t}\n\trr := &readerAtReader{r: r, off: a.metadata + 5}\n\ta.Metadata, err = ReadMap(rr)\n\tif err != nil {\n\t\treturn\n\t}\n\tc, err := ReadVaruint(rr)\n\tif err != nil {\n\t\treturn\n\t}\n\ta.FileCount = int(c)\n\ta.index = rr.off\n\treturn\n}\n\ntype SBAsset6 struct {\n\tFileCount int\n\tIndex map[string]IndexEntry\n\tMetadata map[string]interface{}\n\n\tr io.ReaderAt\n\tmetadata int64\n\tindex int64\n}\n\nfunc (a *SBAsset6) GetReader(path string) (r io.Reader, err error) {\n\tentry, ok := a.Index[path]\n\tif !ok {\n\t\treturn nil, ErrKeyNotFound\n\t}\n\treturn io.NewSectionReader(a.r, entry.Offset, entry.Size), nil\n}\n\nfunc (a *SBAsset6) ReadIndex() error {\n\ta.Index = make(map[string]IndexEntry, a.FileCount)\n\tbuf := make([]byte, 255)\n\tr := &readerAtReader{r: a.r, off: a.index}\n\tfor i := 0; i < a.FileCount; i++ {\n\t\t\/\/ Read the path, which will be used as the index key.\n\t\tkey, err := ReadString(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Read the offset and size of the file.\n\t\t_, err = r.Read(buf[:16])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar entry IndexEntry\n\t\tentry.Offset = int64(binary.BigEndian.Uint64(buf))\n\t\tentry.Size = int64(binary.BigEndian.Uint64(buf[8:]))\n\t\t\/\/ Update the map.\n\t\ta.Index[key] = entry\n\t}\n\treturn nil\n}\n<commit_msg>Only allocate 16 bytes in buffer for offset and size<commit_after>package starbound\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n)\n\ntype IndexEntry struct {\n\tOffset, Size int64\n}\n\nfunc NewSBAsset6(r io.ReaderAt) (a *SBAsset6, err error) {\n\tbuf := make([]byte, 16)\n\t\/\/ Read the initial header which points at a metadata section.\n\t_, err = r.ReadAt(buf, 0)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !bytes.Equal(buf[:8], []byte(\"SBAsset6\")) {\n\t\treturn nil, ErrInvalidData\n\t}\n\ta = &SBAsset6{r: r}\n\ta.metadata = int64(binary.BigEndian.Uint64(buf[8:]))\n\t\/\/ Read the metadata section of the asset file.\n\tbuf5 := buf[:5]\n\t_, err = r.ReadAt(buf5, a.metadata)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !bytes.Equal(buf5, []byte(\"INDEX\")) {\n\t\treturn nil, ErrInvalidData\n\t}\n\trr := &readerAtReader{r: r, off: a.metadata + 5}\n\ta.Metadata, err = ReadMap(rr)\n\tif err != nil {\n\t\treturn\n\t}\n\tc, err := ReadVaruint(rr)\n\tif err != nil {\n\t\treturn\n\t}\n\ta.FileCount = int(c)\n\ta.index = rr.off\n\treturn\n}\n\ntype SBAsset6 struct {\n\tFileCount int\n\tIndex map[string]IndexEntry\n\tMetadata map[string]interface{}\n\n\tr io.ReaderAt\n\tmetadata int64\n\tindex int64\n}\n\nfunc (a *SBAsset6) GetReader(path string) (r io.Reader, err error) {\n\tentry, ok := a.Index[path]\n\tif !ok {\n\t\treturn nil, ErrKeyNotFound\n\t}\n\treturn io.NewSectionReader(a.r, entry.Offset, entry.Size), nil\n}\n\nfunc (a *SBAsset6) ReadIndex() error {\n\ta.Index = make(map[string]IndexEntry, a.FileCount)\n\tbuf := make([]byte, 16)\n\tr := &readerAtReader{r: a.r, off: a.index}\n\tfor i := 0; i < a.FileCount; i++ {\n\t\t\/\/ Read the path, which will be used as the index key.\n\t\tkey, err := ReadString(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Read the offset and size of the file.\n\t\t_, err = r.Read(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar entry IndexEntry\n\t\tentry.Offset = int64(binary.BigEndian.Uint64(buf))\n\t\tentry.Size = int64(binary.BigEndian.Uint64(buf[8:]))\n\t\t\/\/ Update the map.\n\t\ta.Index[key] = entry\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/42wim\/tail\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/oschwald\/geoip2-golang\"\n\t_ \"github.com\/pkg\/profile\"\n\t\"github.com\/pquerna\/ffjson\/ffjson\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar flagPrimary, flagQueue, flagBackup, flagTailFile string\nvar geoipDB *geoip2.Reader\nvar geoipCache *lru.Cache\nvar cfg *Config\nvar nrCPU = runtime.GOMAXPROCS(-1)\nvar log = logrus.New()\n\nconst nfLayout = \"2006-01-02T15:04:05.999999999\"\n\nvar myLocation *time.Location\n\nfunc parseLine(line *[]byte) {\n\tvar f nf\n\tvar t time.Time\n\tvar realRegionName, regionName string\n\tvar record *geoip2.City\n\n\terr := ffjson.Unmarshal(*line, &f)\n\tif err != nil {\n\t\t*line = []byte(\"\")\n\t\tlog.Error(err, \"couldn't unmarshal \", *line)\n\t\treturn\n\t}\n\n\t\/\/ use LRU cache\n\tif val, ok := geoipCache.Get(*f.Srcip); ok {\n\t\trecord = val.(*geoip2.City)\n\t} else {\n\t\tip := net.ParseIP(*f.Srcip)\n\t\trecord, _ = geoipDB.City(ip)\n\t\tgeoipCache.Add(*f.Srcip, record)\n\t}\n\n\t\/\/ add @timestamp with zulu (ISO8601 time)\n\tt, _ = time.ParseInLocation(nfLayout, *f.Timestamp, myLocation)\n\tf.Ltimestamp = t.UTC().Format(time.RFC3339Nano)\n\n\tif record.Location.Longitude != 0 && record.Location.Latitude != 0 {\n\t\tmylen := len(record.Subdivisions)\n\t\tif mylen > 0 {\n\t\t\tmylen--\n\t\t\trealRegionName = record.Subdivisions[mylen].Names[\"en\"]\n\t\t\tregionName = record.Subdivisions[mylen].IsoCode\n\t\t}\n\t\tf.GeoIP.Longitude = &record.Location.Longitude\n\t\tf.GeoIP.Latitude = &record.Location.Latitude\n\t\tf.GeoIP.CountryName = record.Country.Names[\"en\"]\n\t\tf.GeoIP.Timezone = &record.Location.TimeZone\n\t\tf.GeoIP.ContinentCode = &record.Continent.Code\n\t\tf.GeoIP.CityName = record.City.Names[\"en\"]\n\t\tf.GeoIP.CountryCode2 = &record.Country.IsoCode\n\t\tf.GeoIP.RealRegionName = &realRegionName\n\t\tf.GeoIP.RegionName = ®ionName\n\t\tf.GeoIP.IP = f.Srcip\n\t\tf.GeoIP.Location = &esGeoIPLocation{f.GeoIP.Longitude, f.GeoIP.Latitude}\n\t\tf.GeoIP.Coordinates = f.GeoIP.Location\n\t}\n\n\t*line, _ = ffjson.Marshal(f)\n}\n\nfunc parseLineWorker(ctx *Context) {\n\tbt0 := time.Now()\n\tshow := true\n\tfor {\n\t\tselect {\n\t\tcase line := <-ctx.lines:\n\t\t\tmyline := []byte(*line)\n\t\t\tparseLine(&myline)\n\t\t\t\/\/ primary buffer full, send to backup\n\t\t\tif len(ctx.parsedLines) > ctx.cfg.General.Buffer*90\/100 {\n\t\t\t\tctx.buffering = true\n\t\t\t\tif show {\n\t\t\t\t\tlog.Info(\"primary isn't fast enough, buffer full, buffering to backup\")\n\t\t\t\t\tshow = false\n\t\t\t\t}\n\t\t\t\t\/\/ only log message every 5 seconds\n\t\t\t\tif time.Since(bt0).Seconds() > 5 {\n\t\t\t\t\tshow = true\n\t\t\t\t\tbt0 = time.Now()\n\t\t\t\t}\n\t\t\t\tctx.backupLines <- &myline\n\t\t\t} else {\n\t\t\t\tctx.buffering = false\n\t\t\t\tctx.parsedLines <- &myline\n\t\t\t}\n\t\tcase <-time.After(time.Second * 3):\n\t\t\tctx.buffering = false\n\t\t}\n\t}\n}\n\nfunc doTask(ctx *Context) {\n\tswitch flagPrimary {\n\tcase \"redis\", \"rabbit\":\n\t\tgo doPrimaryTask(ctx, flagPrimary)\n\tcase \"es\": \/\/ es is pretty slow, start multiple\n\t\tlog.Info(\"backend:starting \", ctx.cfg.Backend[\"es\"].Workers, \" ES backends\")\n\t\tfor i := 0; i < ctx.cfg.Backend[\"es\"].Workers; i++ {\n\t\t\tgo doPrimaryTask(ctx, \"es\")\n\t\t}\n\t}\n\tif flagBackup != \"\" {\n\t\tgo doBackupTask(ctx, flagBackup)\n\t}\n}\n\nfunc rateLogger(ctx *Context) {\n\tparsedRatecount := 0\n\tbackupRatecount := 0\n\tparsedTotalcount := 0\n\tbackupTotalcount := 0\n\tpt0 := time.Now()\n\tpt1 := time.Now()\n\n\tbt0 := time.Now()\n\tbt1 := time.Now()\n\tticker := time.NewTicker(5 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.parsedRate:\n\t\t\tparsedRatecount++\n\t\t\tparsedTotalcount++\n\t\t\tif time.Since(pt0).Seconds() > 5 {\n\t\t\t\tlog.Info(\"primary: total: \", parsedTotalcount,\n\t\t\t\t\t\" rate: \", int(float64(parsedRatecount)\/float64(time.Since(pt0).Seconds())), \"\/s\",\n\t\t\t\t\t\" avg rate: \", int(float64(parsedTotalcount)\/float64(time.Since(pt1).Seconds())), \"\/s\",\n\t\t\t\t\t\" buffer: \", len(ctx.parsedLines))\n\t\t\t\tpt0 = time.Now()\n\t\t\t\tparsedRatecount = 0\n\t\t\t}\n\t\tcase <-ctx.backupRate:\n\t\t\tbackupRatecount++\n\t\t\tbackupTotalcount++\n\t\t\tif time.Since(bt0).Seconds() > 5 {\n\t\t\t\tlog.Info(\"backup: total: \", backupTotalcount,\n\t\t\t\t\t\" rate: \", int(float64(backupRatecount)\/float64(time.Since(bt0).Seconds())), \"\/s\",\n\t\t\t\t\t\" avg rate: \", int(float64(backupTotalcount)\/float64(time.Since(bt1).Seconds())), \"\/s\",\n\t\t\t\t\t\" buffer: \", len(ctx.parsedLines))\n\t\t\t\tbt0 = time.Now()\n\t\t\t\tbackupRatecount = 0\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\t\/\/log.Debug(backupRatecount, \":\", ctx.backupRateInt)\n\t\t\tif backupRatecount == ctx.backupRateInt {\n\t\t\t\t\/\/log.Debug(\"ratelogger: sending restoreStart\")\n\t\t\t\tctx.restoreStart <- \"ratelogger\"\n\t\t\t} else {\n\t\t\t\tctx.backupRateInt = backupRatecount\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc tailUlog(ctx *Context) {\n\tlogfile := flagTailFile\n\tt, err := tail.TailFile(logfile, tail.Config{Poll: true, Follow: true, ReOpen: true, Pipe: true})\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\t\/\/ create the workers. line goes in, parsed line goes out\n\tfor i := 0; i < nrCPU\/2; i++ {\n\t\tgo parseLineWorker(ctx)\n\t}\n\t\/\/ do primary and backup tasks\n\tgo doTask(ctx)\n\n\t\/\/show some stats\n\tgo rateLogger(ctx)\n\n\tfor line := range t.Lines {\n\t\tctx.lines <- &line.Text\n\t}\n}\n\nfunc failOnError(err error, msg string) {\n\tif err != nil {\n\t\tlog.Fatalf(\"%s: %s\", msg, err)\n\t\tpanic(fmt.Sprintf(\"%s: %s\", msg, err))\n\t}\n}\n\nfunc init() {\n\tvar flagDebug bool\n\tvar flagConfig string\n\tflag.StringVar(&flagPrimary, \"primary\", \"\", \"name of primary backend (es\/redis\/rabbit)\")\n\tflag.StringVar(&flagBackup, \"backup\", \"\", \"name of backup backend (disk\/redis\/rabbit)\")\n\tflag.StringVar(&flagConfig, \"conf\", \"ulog2queue.cfg\", \"config file\")\n\tflag.StringVar(&flagTailFile, \"tail\", \"\", \"file to tail\")\n\tflag.BoolVar(&flagDebug, \"debug\", false, \"enable debug\")\n\thostname, _ := os.Hostname()\n\tflag.StringVar(&flagQueue, \"queue\", hostname, \"name of queue\")\n\tlog.Level = logrus.InfoLevel\n\tflag.Parse()\n\tif flagDebug {\n\t\tlog.Println(\"enabling debug\")\n\t\tlog.Level = logrus.DebugLevel\n\t}\n\tcfg = NewConfig(flagConfig)\n\tif flagPrimary == \"\" {\n\t\tflagPrimary = cfg.General.Primary\n\t}\n\tif flagBackup == \"\" {\n\t\tflagBackup = cfg.General.Backup\n\t}\n\tif flagTailFile == \"\" {\n\t\tflagTailFile = cfg.General.TailFile\n\t}\n\tmyLocation, _ = time.LoadLocation(\"Local\")\n\tgeoipCache, _ = lru.New(10000)\n}\n\nfunc main() {\n\tvar err error\n\tif nrCPU == 1 { \/\/ no GOMAXPROCS set\n\t\tnrCPU = runtime.NumCPU() \/ 2\n\t\tif nrCPU > 10 {\n\t\t\tnrCPU = 10\n\t\t}\n\t\truntime.GOMAXPROCS(nrCPU)\n\t}\n\tcontext := &Context{make(chan *string, 10000),\n\t\tmake(chan *[]byte, cfg.General.Buffer),\n\t\tmake(chan *[]byte, 10000),\n\t\tmake(chan int),\n\t\tmake(chan int),\n\t\tmake(chan string),\n\t\tmake(chan bool),\n\t\tmake(chan string),\n\t\t0,\n\t\tfalse,\n\t\tcfg}\n\tgeoipDB, err = geoip2.Open(context.cfg.General.Geoip2db)\n\tfailOnError(err, \"can't open geoip db\")\n\tredisPool = NewPool(context)\n\ttailUlog(context)\n}\n<commit_msg>Fix nil pointer panic<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/42wim\/tail\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/oschwald\/geoip2-golang\"\n\t_ \"github.com\/pkg\/profile\"\n\t\"github.com\/pquerna\/ffjson\/ffjson\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar flagPrimary, flagQueue, flagBackup, flagTailFile string\nvar geoipDB *geoip2.Reader\nvar geoipCache *lru.Cache\nvar cfg *Config\nvar nrCPU = runtime.GOMAXPROCS(-1)\nvar log = logrus.New()\n\nconst nfLayout = \"2006-01-02T15:04:05.999999999\"\n\nvar myLocation *time.Location\n\nfunc parseLine(line *[]byte) {\n\tvar f nf\n\tvar t time.Time\n\tvar realRegionName, regionName string\n\tvar record *geoip2.City\n\n\terr := ffjson.Unmarshal(*line, &f)\n\tif err != nil {\n\t\t*line = []byte(\"\")\n\t\tlog.Error(err, \"couldn't unmarshal \", *line)\n\t\treturn\n\t}\n\n\tif f.Srcip == nil {\n\t\tlog.Error(\"Packet without source ip, shouldn't happen: \", *line)\n\t\treturn\n\t}\n\n\t\/\/ use LRU cache\n\tif val, ok := geoipCache.Get(*f.Srcip); ok {\n\t\trecord = val.(*geoip2.City)\n\t} else {\n\t\tip := net.ParseIP(*f.Srcip)\n\t\trecord, _ = geoipDB.City(ip)\n\t\tgeoipCache.Add(*f.Srcip, record)\n\t}\n\n\t\/\/ add @timestamp with zulu (ISO8601 time)\n\tt, _ = time.ParseInLocation(nfLayout, *f.Timestamp, myLocation)\n\tf.Ltimestamp = t.UTC().Format(time.RFC3339Nano)\n\n\tif record.Location.Longitude != 0 && record.Location.Latitude != 0 {\n\t\tmylen := len(record.Subdivisions)\n\t\tif mylen > 0 {\n\t\t\tmylen--\n\t\t\trealRegionName = record.Subdivisions[mylen].Names[\"en\"]\n\t\t\tregionName = record.Subdivisions[mylen].IsoCode\n\t\t}\n\t\tf.GeoIP.Longitude = &record.Location.Longitude\n\t\tf.GeoIP.Latitude = &record.Location.Latitude\n\t\tf.GeoIP.CountryName = record.Country.Names[\"en\"]\n\t\tf.GeoIP.Timezone = &record.Location.TimeZone\n\t\tf.GeoIP.ContinentCode = &record.Continent.Code\n\t\tf.GeoIP.CityName = record.City.Names[\"en\"]\n\t\tf.GeoIP.CountryCode2 = &record.Country.IsoCode\n\t\tf.GeoIP.RealRegionName = &realRegionName\n\t\tf.GeoIP.RegionName = ®ionName\n\t\tf.GeoIP.IP = f.Srcip\n\t\tf.GeoIP.Location = &esGeoIPLocation{f.GeoIP.Longitude, f.GeoIP.Latitude}\n\t\tf.GeoIP.Coordinates = f.GeoIP.Location\n\t}\n\n\t*line, _ = ffjson.Marshal(f)\n}\n\nfunc parseLineWorker(ctx *Context) {\n\tbt0 := time.Now()\n\tshow := true\n\tfor {\n\t\tselect {\n\t\tcase line := <-ctx.lines:\n\t\t\tmyline := []byte(*line)\n\t\t\tparseLine(&myline)\n\t\t\t\/\/ primary buffer full, send to backup\n\t\t\tif len(ctx.parsedLines) > ctx.cfg.General.Buffer*90\/100 {\n\t\t\t\tctx.buffering = true\n\t\t\t\tif show {\n\t\t\t\t\tlog.Info(\"primary isn't fast enough, buffer full, buffering to backup\")\n\t\t\t\t\tshow = false\n\t\t\t\t}\n\t\t\t\t\/\/ only log message every 5 seconds\n\t\t\t\tif time.Since(bt0).Seconds() > 5 {\n\t\t\t\t\tshow = true\n\t\t\t\t\tbt0 = time.Now()\n\t\t\t\t}\n\t\t\t\tctx.backupLines <- &myline\n\t\t\t} else {\n\t\t\t\tctx.buffering = false\n\t\t\t\tctx.parsedLines <- &myline\n\t\t\t}\n\t\tcase <-time.After(time.Second * 3):\n\t\t\tctx.buffering = false\n\t\t}\n\t}\n}\n\nfunc doTask(ctx *Context) {\n\tswitch flagPrimary {\n\tcase \"redis\", \"rabbit\":\n\t\tgo doPrimaryTask(ctx, flagPrimary)\n\tcase \"es\": \/\/ es is pretty slow, start multiple\n\t\tlog.Info(\"backend:starting \", ctx.cfg.Backend[\"es\"].Workers, \" ES backends\")\n\t\tfor i := 0; i < ctx.cfg.Backend[\"es\"].Workers; i++ {\n\t\t\tgo doPrimaryTask(ctx, \"es\")\n\t\t}\n\t}\n\tif flagBackup != \"\" {\n\t\tgo doBackupTask(ctx, flagBackup)\n\t}\n}\n\nfunc rateLogger(ctx *Context) {\n\tparsedRatecount := 0\n\tbackupRatecount := 0\n\tparsedTotalcount := 0\n\tbackupTotalcount := 0\n\tpt0 := time.Now()\n\tpt1 := time.Now()\n\n\tbt0 := time.Now()\n\tbt1 := time.Now()\n\tticker := time.NewTicker(5 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.parsedRate:\n\t\t\tparsedRatecount++\n\t\t\tparsedTotalcount++\n\t\t\tif time.Since(pt0).Seconds() > 5 {\n\t\t\t\tlog.Info(\"primary: total: \", parsedTotalcount,\n\t\t\t\t\t\" rate: \", int(float64(parsedRatecount)\/float64(time.Since(pt0).Seconds())), \"\/s\",\n\t\t\t\t\t\" avg rate: \", int(float64(parsedTotalcount)\/float64(time.Since(pt1).Seconds())), \"\/s\",\n\t\t\t\t\t\" buffer: \", len(ctx.parsedLines))\n\t\t\t\tpt0 = time.Now()\n\t\t\t\tparsedRatecount = 0\n\t\t\t}\n\t\tcase <-ctx.backupRate:\n\t\t\tbackupRatecount++\n\t\t\tbackupTotalcount++\n\t\t\tif time.Since(bt0).Seconds() > 5 {\n\t\t\t\tlog.Info(\"backup: total: \", backupTotalcount,\n\t\t\t\t\t\" rate: \", int(float64(backupRatecount)\/float64(time.Since(bt0).Seconds())), \"\/s\",\n\t\t\t\t\t\" avg rate: \", int(float64(backupTotalcount)\/float64(time.Since(bt1).Seconds())), \"\/s\",\n\t\t\t\t\t\" buffer: \", len(ctx.parsedLines))\n\t\t\t\tbt0 = time.Now()\n\t\t\t\tbackupRatecount = 0\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\t\/\/log.Debug(backupRatecount, \":\", ctx.backupRateInt)\n\t\t\tif backupRatecount == ctx.backupRateInt {\n\t\t\t\t\/\/log.Debug(\"ratelogger: sending restoreStart\")\n\t\t\t\tctx.restoreStart <- \"ratelogger\"\n\t\t\t} else {\n\t\t\t\tctx.backupRateInt = backupRatecount\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc tailUlog(ctx *Context) {\n\tlogfile := flagTailFile\n\tt, err := tail.TailFile(logfile, tail.Config{Poll: true, Follow: true, ReOpen: true, Pipe: true})\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\t\/\/ create the workers. line goes in, parsed line goes out\n\tfor i := 0; i < nrCPU\/2; i++ {\n\t\tgo parseLineWorker(ctx)\n\t}\n\t\/\/ do primary and backup tasks\n\tgo doTask(ctx)\n\n\t\/\/show some stats\n\tgo rateLogger(ctx)\n\n\tfor line := range t.Lines {\n\t\tctx.lines <- &line.Text\n\t}\n}\n\nfunc failOnError(err error, msg string) {\n\tif err != nil {\n\t\tlog.Fatalf(\"%s: %s\", msg, err)\n\t\tpanic(fmt.Sprintf(\"%s: %s\", msg, err))\n\t}\n}\n\nfunc init() {\n\tvar flagDebug bool\n\tvar flagConfig string\n\tflag.StringVar(&flagPrimary, \"primary\", \"\", \"name of primary backend (es\/redis\/rabbit)\")\n\tflag.StringVar(&flagBackup, \"backup\", \"\", \"name of backup backend (disk\/redis\/rabbit)\")\n\tflag.StringVar(&flagConfig, \"conf\", \"ulog2queue.cfg\", \"config file\")\n\tflag.StringVar(&flagTailFile, \"tail\", \"\", \"file to tail\")\n\tflag.BoolVar(&flagDebug, \"debug\", false, \"enable debug\")\n\thostname, _ := os.Hostname()\n\tflag.StringVar(&flagQueue, \"queue\", hostname, \"name of queue\")\n\tlog.Level = logrus.InfoLevel\n\tflag.Parse()\n\tif flagDebug {\n\t\tlog.Println(\"enabling debug\")\n\t\tlog.Level = logrus.DebugLevel\n\t}\n\tcfg = NewConfig(flagConfig)\n\tif flagPrimary == \"\" {\n\t\tflagPrimary = cfg.General.Primary\n\t}\n\tif flagBackup == \"\" {\n\t\tflagBackup = cfg.General.Backup\n\t}\n\tif flagTailFile == \"\" {\n\t\tflagTailFile = cfg.General.TailFile\n\t}\n\tmyLocation, _ = time.LoadLocation(\"Local\")\n\tgeoipCache, _ = lru.New(10000)\n}\n\nfunc main() {\n\tvar err error\n\tif nrCPU == 1 { \/\/ no GOMAXPROCS set\n\t\tnrCPU = runtime.NumCPU() \/ 2\n\t\tif nrCPU > 10 {\n\t\t\tnrCPU = 10\n\t\t}\n\t\truntime.GOMAXPROCS(nrCPU)\n\t}\n\tcontext := &Context{make(chan *string, 10000),\n\t\tmake(chan *[]byte, cfg.General.Buffer),\n\t\tmake(chan *[]byte, 10000),\n\t\tmake(chan int),\n\t\tmake(chan int),\n\t\tmake(chan string),\n\t\tmake(chan bool),\n\t\tmake(chan string),\n\t\t0,\n\t\tfalse,\n\t\tcfg}\n\tgeoipDB, err = geoip2.Open(context.cfg.General.Geoip2db)\n\tfailOnError(err, \"can't open geoip db\")\n\tredisPool = NewPool(context)\n\ttailUlog(context)\n}\n<|endoftext|>"} {"text":"<commit_before>package class\n\nimport (\n \"strings\"\n)\n\ntype MemberDescriptorParser struct {\n descriptor string\n offset int\n md *MethodDescriptor\n}\n\nfunc newMemberDescriptorParser(descriptor string) (*MemberDescriptorParser) {\n return &MemberDescriptorParser{descriptor: descriptor}\n}\n\nfunc (self *MemberDescriptorParser) parse() (*MethodDescriptor) {\n self.md = &MethodDescriptor{}\n self.startParams()\n self.parseParameterTypes()\n self.endParams()\n self.parseReturnType()\n self.finish()\n return self.md\n}\n\nfunc (self *MemberDescriptorParser) startParams() {\n b := self.readUint8()\n if b != '(' {\n self.causePanic()\n }\n}\nfunc (self *MemberDescriptorParser) endParams() {\n b := self.readUint8()\n if b != ')' {\n self.causePanic()\n }\n}\nfunc (self *MemberDescriptorParser) finish() {\n if self.offset != len(self.descriptor) {\n self.causePanic()\n }\n}\n\nfunc (self *MemberDescriptorParser) parseParameterTypes() {\n for {\n t := self.readFieldType()\n if t != nil {\n self.md.addParameterType(t)\n } else {\n break\n }\n }\n}\nfunc (self *MemberDescriptorParser) parseReturnType() {\n t := self.readFieldType()\n if t != nil {\n self.md.returnType = t\n } else {\n self.causePanic()\n }\n}\n\nfunc (self *MemberDescriptorParser) readUint8() uint8 {\n b := self.descriptor[self.offset]\n self.offset++\n return b\n}\nfunc (self *MemberDescriptorParser) unreadUint8() {\n self.offset--\n}\n\nfunc (self *MemberDescriptorParser) readFieldType() (*FieldType) {\n switch self.readUint8() {\n case 'B': return baseTypeB\n case 'C': return baseTypeC\n case 'D': return baseTypeD\n case 'F': return baseTypeF\n case 'I': return baseTypeI\n case 'J': return baseTypeJ\n case 'S': return baseTypeS\n case 'Z': return baseTypeZ\n case 'V': return baseTypeV\n case 'L': return self.readObjectType()\n case '[': return self.readArrayType()\n default:\n self.unreadUint8()\n return nil\n }\n}\nfunc (self *MemberDescriptorParser) readObjectType() (*FieldType) {\n unread := self.descriptor[self.offset:]\n semicolonIndex := strings.IndexRune(unread, ';')\n if semicolonIndex == -1 {\n self.causePanic()\n return nil\n } else {\n objStart := self.offset - 1\n objEnd := self.offset + semicolonIndex + 1\n self.offset = objEnd\n descriptor := self.descriptor[objStart: objEnd]\n return &FieldType{descriptor}\n }\n}\nfunc (self *MemberDescriptorParser) readArrayType() (*FieldType) {\n arrStart := self.offset - 1\n self.readFieldType()\n arrEnd := self.offset\n descriptor := self.descriptor[arrStart: arrEnd]\n return &FieldType{descriptor}\n}\n\nfunc (self *MemberDescriptorParser) causePanic() {\n panic(\"BAD descriptor: \" + self.descriptor)\n}\n<commit_msg>code refactor<commit_after>package class\n\nimport (\n \"strings\"\n)\n\ntype MemberDescriptorParser struct {\n descriptor string\n offset int\n md *MethodDescriptor\n}\n\nfunc newMemberDescriptorParser(descriptor string) (*MemberDescriptorParser) {\n return &MemberDescriptorParser{descriptor: descriptor}\n}\n\nfunc (self *MemberDescriptorParser) parse() (*MethodDescriptor) {\n self.md = &MethodDescriptor{}\n self.startParams()\n self.parseParameterTypes()\n self.endParams()\n self.parseReturnType()\n self.finish()\n return self.md\n}\n\nfunc (self *MemberDescriptorParser) startParams() {\n if self.readUint8() != '(' {\n self.causePanic()\n }\n}\nfunc (self *MemberDescriptorParser) endParams() {\n if self.readUint8() != ')' {\n self.causePanic()\n }\n}\nfunc (self *MemberDescriptorParser) finish() {\n if self.offset != len(self.descriptor) {\n self.causePanic()\n }\n}\n\nfunc (self *MemberDescriptorParser) parseParameterTypes() {\n for {\n t := self.readFieldType()\n if t != nil {\n self.md.addParameterType(t)\n } else {\n break\n }\n }\n}\nfunc (self *MemberDescriptorParser) parseReturnType() {\n t := self.readFieldType()\n if t != nil {\n self.md.returnType = t\n } else {\n self.causePanic()\n }\n}\n\nfunc (self *MemberDescriptorParser) readUint8() uint8 {\n b := self.descriptor[self.offset]\n self.offset++\n return b\n}\nfunc (self *MemberDescriptorParser) unreadUint8() {\n self.offset--\n}\n\nfunc (self *MemberDescriptorParser) readFieldType() (*FieldType) {\n switch self.readUint8() {\n case 'B': return baseTypeB\n case 'C': return baseTypeC\n case 'D': return baseTypeD\n case 'F': return baseTypeF\n case 'I': return baseTypeI\n case 'J': return baseTypeJ\n case 'S': return baseTypeS\n case 'Z': return baseTypeZ\n case 'V': return baseTypeV\n case 'L': return self.readObjectType()\n case '[': return self.readArrayType()\n default:\n self.unreadUint8()\n return nil\n }\n}\nfunc (self *MemberDescriptorParser) readObjectType() (*FieldType) {\n unread := self.descriptor[self.offset:]\n semicolonIndex := strings.IndexRune(unread, ';')\n if semicolonIndex == -1 {\n self.causePanic()\n return nil\n } else {\n objStart := self.offset - 1\n objEnd := self.offset + semicolonIndex + 1\n self.offset = objEnd\n descriptor := self.descriptor[objStart: objEnd]\n return &FieldType{descriptor}\n }\n}\nfunc (self *MemberDescriptorParser) readArrayType() (*FieldType) {\n arrStart := self.offset - 1\n self.readFieldType()\n arrEnd := self.offset\n descriptor := self.descriptor[arrStart: arrEnd]\n return &FieldType{descriptor}\n}\n\nfunc (self *MemberDescriptorParser) causePanic() {\n panic(\"BAD descriptor: \" + self.descriptor)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/printer\"\n\t\"go\/token\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ godefs returns the output for -godefs mode.\nfunc (p *Package) godefs(f *File, srcfile string) string {\n\tvar buf bytes.Buffer\n\n\tfmt.Fprintf(&buf, \"\/\/ Created by cgo -godefs - DO NOT EDIT\\n\")\n\tfmt.Fprintf(&buf, \"\/\/ %s\\n\", strings.Join(os.Args, \" \"))\n\tfmt.Fprintf(&buf, \"\\n\")\n\n\toverride := make(map[string]string)\n\n\t\/\/ Allow source file to specify override mappings.\n\t\/\/ For example, the socket data structures refer\n\t\/\/ to in_addr and in_addr6 structs but we want to be\n\t\/\/ able to treat them as byte arrays, so the godefs\n\t\/\/ inputs in package syscall say\n\t\/\/\n\t\/\/\t\/\/ +godefs map struct_in_addr [4]byte\n\t\/\/\t\/\/ +godefs map struct_in_addr6 [16]byte\n\t\/\/\n\tfor _, g := range f.Comments {\n\t\tfor _, c := range g.List {\n\t\t\ti := strings.Index(c.Text, \"+godefs map\")\n\t\t\tif i < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts := strings.TrimSpace(c.Text[i+len(\"+godefs map\"):])\n\t\t\ti = strings.Index(s, \" \")\n\t\t\tif i < 0 {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"invalid +godefs map comment: %s\\n\", c.Text)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toverride[\"_Ctype_\"+strings.TrimSpace(s[:i])] = strings.TrimSpace(s[i:])\n\t\t}\n\t}\n\tfor _, n := range f.Name {\n\t\tif s := override[n.Go]; s != \"\" {\n\t\t\toverride[n.Mangle] = s\n\t\t}\n\t}\n\n\t\/\/ Otherwise, if the source file says type T C.whatever,\n\t\/\/ use \"T\" as the mangling of C.whatever,\n\t\/\/ except in the definition (handled at end of function).\n\trefName := make(map[*ast.Expr]*Name)\n\tfor _, r := range f.Ref {\n\t\trefName[r.Expr] = r.Name\n\t}\n\tfor _, d := range f.AST.Decls {\n\t\td, ok := d.(*ast.GenDecl)\n\t\tif !ok || d.Tok != token.TYPE {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range d.Specs {\n\t\t\ts := s.(*ast.TypeSpec)\n\t\t\tn := refName[&s.Type]\n\t\t\tif n != nil && n.Mangle != \"\" {\n\t\t\t\toverride[n.Mangle] = s.Name.Name\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Extend overrides using typedefs:\n\t\/\/ If we know that C.xxx should format as T\n\t\/\/ and xxx is a typedef for yyy, make C.yyy format as T.\n\tfor typ, def := range typedef {\n\t\tif new := override[typ]; new != \"\" {\n\t\t\tif id, ok := def.(*ast.Ident); ok {\n\t\t\t\toverride[id.Name] = new\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Apply overrides.\n\tfor old, new := range override {\n\t\tif id := goIdent[old]; id != nil {\n\t\t\tid.Name = new\n\t\t}\n\t}\n\n\t\/\/ Any names still using the _C syntax are not going to compile,\n\t\/\/ although in general we don't know whether they all made it\n\t\/\/ into the file, so we can't warn here.\n\t\/\/\n\t\/\/ The most common case is union types, which begin with\n\t\/\/ _Ctype_union and for which typedef[name] is a Go byte\n\t\/\/ array of the appropriate size (such as [4]byte).\n\t\/\/ Substitute those union types with byte arrays.\n\tfor name, id := range goIdent {\n\t\tif id.Name == name && strings.Contains(name, \"_Ctype_union\") {\n\t\t\tif def := typedef[name]; def != nil {\n\t\t\t\tid.Name = gofmt(def)\n\t\t\t}\n\t\t}\n\t}\n\n\tprinter.Fprint(&buf, fset, f.AST)\n\n\treturn buf.String()\n}\n\n\/\/ cdefs returns the output for -cdefs mode.\n\/\/ The easiest way to do this is to translate the godefs Go to C.\nfunc (p *Package) cdefs(f *File, srcfile string) string {\n\tgodefsOutput := p.godefs(f, srcfile)\n\n\tlines := strings.Split(godefsOutput, \"\\n\")\n\tlines[0] = \"\/\/ Created by cgo -cdefs - DO NOT EDIT\"\n\n\tfor i, line := range lines {\n\t\tlines[i] = strings.TrimSpace(line)\n\t}\n\n\tvar out bytes.Buffer\n\tprintf := func(format string, args ...interface{}) { fmt.Fprintf(&out, format, args...) }\n\n\tdidTypedef := false\n\tfor i := 0; i < len(lines); i++ {\n\t\tline := lines[i]\n\n\t\t\/\/ Delete\n\t\t\/\/\tpackage x\n\t\tif strings.HasPrefix(line, \"package \") {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\tconst (\n\t\t\/\/\t\tA = 1\n\t\t\/\/\t\tB = 2\n\t\t\/\/\t)\n\t\t\/\/\n\t\t\/\/ to\n\t\t\/\/\n\t\t\/\/\tenum {\n\t\t\/\/\t\tA = 1,\n\t\t\/\/\t\tB = 2,\n\t\t\/\/\t};\n\t\tif line == \"const (\" {\n\t\t\tprintf(\"enum {\\n\")\n\t\t\tfor i++; i < len(lines) && lines[i] != \")\"; i++ {\n\t\t\t\tline = lines[i]\n\t\t\t\tif line != \"\" {\n\t\t\t\t\tprintf(\"\\t%s,\", line)\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\")\n\t\t\t}\n\t\t\tprintf(\"};\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\tconst A = 1\n\t\t\/\/ to\n\t\t\/\/\tenum { A = 1 };\n\t\tif strings.HasPrefix(line, \"const \") {\n\t\t\tprintf(\"enum { %s };\\n\", line[len(\"const \"):])\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ On first type definition, typedef all the structs\n\t\t\/\/ in case there are dependencies between them.\n\t\tif !didTypedef && strings.HasPrefix(line, \"type \") {\n\t\t\tdidTypedef = true\n\t\t\tfor _, line := range lines {\n\t\t\t\tline = strings.TrimSpace(line)\n\t\t\t\tif strings.HasPrefix(line, \"type \") && strings.HasSuffix(line, \" struct {\") {\n\t\t\t\t\ts := line[len(\"type \") : len(line)-len(\" struct {\")]\n\t\t\t\t\tprintf(\"typedef struct %s %s;\\n\", s, s)\n\t\t\t\t}\n\t\t\t}\n\t\t\tprintf(\"\\n\")\n\t\t\tprintf(\"#pragma pack on\\n\")\n\t\t\tprintf(\"\\n\")\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\ttype T struct {\n\t\t\/\/\t\tX int64\n\t\t\/\/\t\tY *int32\n\t\t\/\/\t\tZ [4]byte\n\t\t\/\/\t}\n\t\t\/\/\n\t\t\/\/ to\n\t\t\/\/\n\t\t\/\/\tstruct T {\n\t\t\/\/\t\tint64 X;\n\t\t\/\/\t\tint32 *Y;\n\t\t\/\/\t\tbyte Z[4];\n\t\t\/\/\t}\n\t\tif strings.HasPrefix(line, \"type \") && strings.HasSuffix(line, \" struct {\") {\n\t\t\ts := line[len(\"type \") : len(line)-len(\" struct {\")]\n\t\t\tprintf(\"struct %s {\\n\", s)\n\t\t\tfor i++; i < len(lines) && lines[i] != \"}\"; i++ {\n\t\t\t\tline := lines[i]\n\t\t\t\tif line != \"\" {\n\t\t\t\t\tf := strings.Fields(line)\n\t\t\t\t\tif len(f) != 2 {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"cgo: cannot parse struct field: %s\\n\", line)\n\t\t\t\t\t\tnerrors++\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tprintf(\"\\t%s;\", cdecl(f[0], f[1]))\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\")\n\t\t\t}\n\t\t\tprintf(\"};\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\ttype T int\n\t\t\/\/ to\n\t\t\/\/\ttypedef int T;\n\t\tif strings.HasPrefix(line, \"type \") {\n\t\t\tf := strings.Fields(line[len(\"type \"):])\n\t\t\tif len(f) != 2 {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"cgo: cannot parse type definition: %s\\n\", line)\n\t\t\t\tnerrors++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprintf(\"typedef\\t%s;\\n\", cdecl(f[0], f[1]))\n\t\t\tcontinue\n\t\t}\n\n\t\tprintf(\"%s\\n\", line)\n\t}\n\n\tif didTypedef {\n\t\tprintf(\"\\n\")\n\t\tprintf(\"#pragma pack off\\n\")\n\t}\n\n\treturn out.String()\n}\n\n\/\/ cdecl returns the C declaration for the given Go name and type.\n\/\/ It only handles the specific cases necessary for converting godefs output.\nfunc cdecl(name, typ string) string {\n\t\/\/ X *[0]byte -> X *void\n\tif strings.HasPrefix(typ, \"*[0]\") {\n\t\ttyp = \"*void\"\n\t}\n\t\/\/ X *byte -> *X byte\n\tif strings.HasPrefix(typ, \"*\") {\n\t\tname = \"*\" + name\n\t\ttyp = typ[1:]\n\t}\n\t\/\/ X [4]byte -> X[4] byte\n\tif strings.HasPrefix(typ, \"[\") {\n\t\ti := strings.Index(typ, \"]\") + 1\n\t\tname = name + typ[:i]\n\t\ttyp = typ[i:]\n\t}\n\t\/\/ X T -> T X\n\treturn typ + \"\\t\" + name\n}\n\nvar gofmtBuf bytes.Buffer\n\n\/\/ gofmt returns the gofmt-formatted string for an AST node.\nfunc gofmt(n interface{}) string {\n\tgofmtBuf.Reset()\n\terr := printer.Fprint(&gofmtBuf, fset, n)\n\tif err != nil {\n\t\treturn \"<\" + err.Error() + \">\"\n\t}\n\treturn gofmtBuf.String()\n}\n<commit_msg>cgo: -cdefs should translate unsafe.Pointer to void * Fixes #2454.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/printer\"\n\t\"go\/token\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ godefs returns the output for -godefs mode.\nfunc (p *Package) godefs(f *File, srcfile string) string {\n\tvar buf bytes.Buffer\n\n\tfmt.Fprintf(&buf, \"\/\/ Created by cgo -godefs - DO NOT EDIT\\n\")\n\tfmt.Fprintf(&buf, \"\/\/ %s\\n\", strings.Join(os.Args, \" \"))\n\tfmt.Fprintf(&buf, \"\\n\")\n\n\toverride := make(map[string]string)\n\n\t\/\/ Allow source file to specify override mappings.\n\t\/\/ For example, the socket data structures refer\n\t\/\/ to in_addr and in_addr6 structs but we want to be\n\t\/\/ able to treat them as byte arrays, so the godefs\n\t\/\/ inputs in package syscall say\n\t\/\/\n\t\/\/\t\/\/ +godefs map struct_in_addr [4]byte\n\t\/\/\t\/\/ +godefs map struct_in_addr6 [16]byte\n\t\/\/\n\tfor _, g := range f.Comments {\n\t\tfor _, c := range g.List {\n\t\t\ti := strings.Index(c.Text, \"+godefs map\")\n\t\t\tif i < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts := strings.TrimSpace(c.Text[i+len(\"+godefs map\"):])\n\t\t\ti = strings.Index(s, \" \")\n\t\t\tif i < 0 {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"invalid +godefs map comment: %s\\n\", c.Text)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toverride[\"_Ctype_\"+strings.TrimSpace(s[:i])] = strings.TrimSpace(s[i:])\n\t\t}\n\t}\n\tfor _, n := range f.Name {\n\t\tif s := override[n.Go]; s != \"\" {\n\t\t\toverride[n.Mangle] = s\n\t\t}\n\t}\n\n\t\/\/ Otherwise, if the source file says type T C.whatever,\n\t\/\/ use \"T\" as the mangling of C.whatever,\n\t\/\/ except in the definition (handled at end of function).\n\trefName := make(map[*ast.Expr]*Name)\n\tfor _, r := range f.Ref {\n\t\trefName[r.Expr] = r.Name\n\t}\n\tfor _, d := range f.AST.Decls {\n\t\td, ok := d.(*ast.GenDecl)\n\t\tif !ok || d.Tok != token.TYPE {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range d.Specs {\n\t\t\ts := s.(*ast.TypeSpec)\n\t\t\tn := refName[&s.Type]\n\t\t\tif n != nil && n.Mangle != \"\" {\n\t\t\t\toverride[n.Mangle] = s.Name.Name\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Extend overrides using typedefs:\n\t\/\/ If we know that C.xxx should format as T\n\t\/\/ and xxx is a typedef for yyy, make C.yyy format as T.\n\tfor typ, def := range typedef {\n\t\tif new := override[typ]; new != \"\" {\n\t\t\tif id, ok := def.(*ast.Ident); ok {\n\t\t\t\toverride[id.Name] = new\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Apply overrides.\n\tfor old, new := range override {\n\t\tif id := goIdent[old]; id != nil {\n\t\t\tid.Name = new\n\t\t}\n\t}\n\n\t\/\/ Any names still using the _C syntax are not going to compile,\n\t\/\/ although in general we don't know whether they all made it\n\t\/\/ into the file, so we can't warn here.\n\t\/\/\n\t\/\/ The most common case is union types, which begin with\n\t\/\/ _Ctype_union and for which typedef[name] is a Go byte\n\t\/\/ array of the appropriate size (such as [4]byte).\n\t\/\/ Substitute those union types with byte arrays.\n\tfor name, id := range goIdent {\n\t\tif id.Name == name && strings.Contains(name, \"_Ctype_union\") {\n\t\t\tif def := typedef[name]; def != nil {\n\t\t\t\tid.Name = gofmt(def)\n\t\t\t}\n\t\t}\n\t}\n\n\tprinter.Fprint(&buf, fset, f.AST)\n\n\treturn buf.String()\n}\n\n\/\/ cdefs returns the output for -cdefs mode.\n\/\/ The easiest way to do this is to translate the godefs Go to C.\nfunc (p *Package) cdefs(f *File, srcfile string) string {\n\tgodefsOutput := p.godefs(f, srcfile)\n\n\tlines := strings.Split(godefsOutput, \"\\n\")\n\tlines[0] = \"\/\/ Created by cgo -cdefs - DO NOT EDIT\"\n\n\tfor i, line := range lines {\n\t\tlines[i] = strings.TrimSpace(line)\n\t}\n\n\tvar out bytes.Buffer\n\tprintf := func(format string, args ...interface{}) { fmt.Fprintf(&out, format, args...) }\n\n\tdidTypedef := false\n\tfor i := 0; i < len(lines); i++ {\n\t\tline := lines[i]\n\n\t\t\/\/ Delete\n\t\t\/\/\tpackage x\n\t\tif strings.HasPrefix(line, \"package \") {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\tconst (\n\t\t\/\/\t\tA = 1\n\t\t\/\/\t\tB = 2\n\t\t\/\/\t)\n\t\t\/\/\n\t\t\/\/ to\n\t\t\/\/\n\t\t\/\/\tenum {\n\t\t\/\/\t\tA = 1,\n\t\t\/\/\t\tB = 2,\n\t\t\/\/\t};\n\t\tif line == \"const (\" {\n\t\t\tprintf(\"enum {\\n\")\n\t\t\tfor i++; i < len(lines) && lines[i] != \")\"; i++ {\n\t\t\t\tline = lines[i]\n\t\t\t\tif line != \"\" {\n\t\t\t\t\tprintf(\"\\t%s,\", line)\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\")\n\t\t\t}\n\t\t\tprintf(\"};\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\tconst A = 1\n\t\t\/\/ to\n\t\t\/\/\tenum { A = 1 };\n\t\tif strings.HasPrefix(line, \"const \") {\n\t\t\tprintf(\"enum { %s };\\n\", line[len(\"const \"):])\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ On first type definition, typedef all the structs\n\t\t\/\/ in case there are dependencies between them.\n\t\tif !didTypedef && strings.HasPrefix(line, \"type \") {\n\t\t\tdidTypedef = true\n\t\t\tfor _, line := range lines {\n\t\t\t\tline = strings.TrimSpace(line)\n\t\t\t\tif strings.HasPrefix(line, \"type \") && strings.HasSuffix(line, \" struct {\") {\n\t\t\t\t\ts := line[len(\"type \") : len(line)-len(\" struct {\")]\n\t\t\t\t\tprintf(\"typedef struct %s %s;\\n\", s, s)\n\t\t\t\t}\n\t\t\t}\n\t\t\tprintf(\"\\n\")\n\t\t\tprintf(\"#pragma pack on\\n\")\n\t\t\tprintf(\"\\n\")\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\ttype T struct {\n\t\t\/\/\t\tX int64\n\t\t\/\/\t\tY *int32\n\t\t\/\/\t\tZ [4]byte\n\t\t\/\/\t}\n\t\t\/\/\n\t\t\/\/ to\n\t\t\/\/\n\t\t\/\/\tstruct T {\n\t\t\/\/\t\tint64 X;\n\t\t\/\/\t\tint32 *Y;\n\t\t\/\/\t\tbyte Z[4];\n\t\t\/\/\t}\n\t\tif strings.HasPrefix(line, \"type \") && strings.HasSuffix(line, \" struct {\") {\n\t\t\ts := line[len(\"type \") : len(line)-len(\" struct {\")]\n\t\t\tprintf(\"struct %s {\\n\", s)\n\t\t\tfor i++; i < len(lines) && lines[i] != \"}\"; i++ {\n\t\t\t\tline := lines[i]\n\t\t\t\tif line != \"\" {\n\t\t\t\t\tf := strings.Fields(line)\n\t\t\t\t\tif len(f) != 2 {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"cgo: cannot parse struct field: %s\\n\", line)\n\t\t\t\t\t\tnerrors++\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tprintf(\"\\t%s;\", cdecl(f[0], f[1]))\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\")\n\t\t\t}\n\t\t\tprintf(\"};\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Convert\n\t\t\/\/\ttype T int\n\t\t\/\/ to\n\t\t\/\/\ttypedef int T;\n\t\tif strings.HasPrefix(line, \"type \") {\n\t\t\tf := strings.Fields(line[len(\"type \"):])\n\t\t\tif len(f) != 2 {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"cgo: cannot parse type definition: %s\\n\", line)\n\t\t\t\tnerrors++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprintf(\"typedef\\t%s;\\n\", cdecl(f[0], f[1]))\n\t\t\tcontinue\n\t\t}\n\n\t\tprintf(\"%s\\n\", line)\n\t}\n\n\tif didTypedef {\n\t\tprintf(\"\\n\")\n\t\tprintf(\"#pragma pack off\\n\")\n\t}\n\n\treturn out.String()\n}\n\n\/\/ cdecl returns the C declaration for the given Go name and type.\n\/\/ It only handles the specific cases necessary for converting godefs output.\nfunc cdecl(name, typ string) string {\n\t\/\/ X *[0]byte -> X *void\n\tif strings.HasPrefix(typ, \"*[0]\") {\n\t\ttyp = \"*void\"\n\t}\n\t\/\/ X *byte -> *X byte\n\tif strings.HasPrefix(typ, \"*\") {\n\t\tname = \"*\" + name\n\t\ttyp = typ[1:]\n\t}\n\t\/\/ X [4]byte -> X[4] byte\n\tif strings.HasPrefix(typ, \"[\") {\n\t\ti := strings.Index(typ, \"]\") + 1\n\t\tname = name + typ[:i]\n\t\ttyp = typ[i:]\n\t}\n\t\/\/ X T -> T X\n\t\/\/ Handle the special case: 'unsafe.Pointer' is 'void *'\n\tif typ == \"unsafe.Pointer\" {\n\t\ttyp = \"void\"\n\t\tname = \"*\" + name\n\t}\n\treturn typ + \"\\t\" + name\n}\n\nvar gofmtBuf bytes.Buffer\n\n\/\/ gofmt returns the gofmt-formatted string for an AST node.\nfunc gofmt(n interface{}) string {\n\tgofmtBuf.Reset()\n\terr := printer.Fprint(&gofmtBuf, fset, n)\n\tif err != nil {\n\t\treturn \"<\" + err.Error() + \">\"\n\t}\n\treturn gofmtBuf.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\/\/ TODO \"github.com\/tianon\/dtodo\/src\/dnew\"\n\t\"dnew\"\n\n\t\"pault.ag\/go\/debian\/changelog\"\n\t\"pault.ag\/go\/debian\/control\"\n\t\"pault.ag\/go\/debian\/dependency\"\n\t\"pault.ag\/go\/resolver\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.Lshortfile)\n\n\t\/\/ TODO configurable path? perhaps allow for an optional *.dsc instead?\n\tcon, err := control.ParseControlFile(\"debian\/control\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tchg, err := changelog.ParseFileOne(\"debian\/changelog\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\t\/\/ TODO configurable or something to avoid guesswork\n\ttargetSuite := chg.Target\n\tif targetSuite == \"UNRELEASED\" {\n\t\t\/\/ check for \"Upload to XYZ.\" in changelog\n\t\tre := regexp.MustCompile(`^\\s*\\*?\\s*Upload\\s+to\\s+(\\S+?)\\.?(\\s+|$)`)\n\t\tmatches := re.FindStringSubmatch(chg.Changelog)\n\t\tif matches != nil {\n\t\t\ttargetSuite = matches[1]\n\t\t} else {\n\t\t\ttargetSuite = \"unstable\"\n\t\t}\n\t}\n\n\t\/\/ TODO configurable\n\tarch := \"amd64\"\n\n\tfmt.Printf(\"Target: %s\\n\", targetSuite)\n\tfmt.Printf(\"Architecture: %s\\n\", arch)\n\tfmt.Printf(\"Source: %s\\n\", con.Source.Source)\n\tfmt.Printf(\"Version: %s\\n\", chg.Version)\n\tfmt.Printf(\"\\n\")\n\n\tindex, err := resolver.GetBinaryIndex(\n\t\t\"http:\/\/httpredir.debian.org\/debian\",\n\t\ttargetSuite,\n\t\t\"main\",\n\t\tarch,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\t\/\/ TODO use target suite to include more suites if necessary (ie, \"experimental\" needs \"sid\" too)\n\n\tincoming, err := resolver.GetBinaryIndex(\n\t\t\"http:\/\/incoming.debian.org\/debian-buildd\",\n\t\t\"buildd-\"+targetSuite,\n\t\t\"main\",\n\t\tarch,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tnewQueue, err := dnew.ParseNewUrl(dnew.New822)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\tnewBinaries := map[string]dnew.NewEntry{}\n\tfor _, newPkg := range newQueue {\n\t\tfor _, newBin := range newPkg.Binary {\n\t\t\tnewBinaries[newBin] = newPkg\n\t\t}\n\t}\n\n\tallDeps := dependency.Dependency{}\n\n\tbinRelation := dependency.Relation{}\n\tfor _, bin := range con.Binaries {\n\t\tbinRelation.Possibilities = append(binRelation.Possibilities, dependency.Possibility{\n\t\t\tName: bin.Package,\n\t\t\tVersion: &dependency.VersionRelation{\n\t\t\t\tOperator: \"=\",\n\t\t\t\tNumber: chg.Version.String(),\n\t\t\t},\n\t\t})\n\t}\n\tallDeps.Relations = append(allDeps.Relations, binRelation)\n\n\tallDeps.Relations = append(allDeps.Relations, con.Source.BuildDepends.Relations...)\n\tallDeps.Relations = append(allDeps.Relations, con.Source.BuildDependsIndep.Relations...)\n\n\tfor _, bin := range con.Binaries {\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Depends.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Recommends.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Suggests.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Enhances.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.PreDepends.Relations...)\n\t}\n\n\tdepArch, err := dependency.ParseArch(\"any\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tseenRelations := map[string]bool{}\n\tfor _, relation := range allDeps.Relations {\n\t\trelationString := relation.String()\n\t\tif seenRelations[relationString] {\n\t\t\tcontinue\n\t\t}\n\t\tseenRelations[relationString] = true\n\n\t\tnotes := []string{}\n\t\tfor _, possi := range relation.Possibilities {\n\t\t\tif possi.Substvar {\n\t\t\t\t\/\/fmt.Printf(\"ignoring substvar %s\\n\", possi)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcan, why, _ := index.ExplainSatisfies(*depArch, possi)\n\t\t\tif !can {\n\t\t\t\tinCan, _, _ := incoming.ExplainSatisfies(*depArch, possi)\n\t\t\t\tif !inCan {\n\t\t\t\t\tif newPkg, ok := newBinaries[possi.Name]; ok {\n\t\t\t\t\t\tnewUrl := fmt.Sprintf(\"https:\/\/ftp-master.debian.org\/new\/%s_%s.html\", newPkg.Source, newPkg.Version[0])\n\t\t\t\t\t\tnotes = append(notes, fmt.Sprintf(\"NEW (%s): %s\", possi.Name, newUrl))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnotes = append(notes, why)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tnotes = append(notes, fmt.Sprintf(\"%s is in incoming\", possi.Name))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(notes) > 0 {\n\t\t\tfmt.Printf(\"Relation: %s\\n\", relation)\n\t\t\tif len(notes) > 1 {\n\t\t\t\tfmt.Printf(\"Notes:\\n %s\\n\", strings.Join(notes, \"\\n \"))\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Notes: %s\\n\", notes[0])\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\t\t}\n\t}\n}\n<commit_msg>Refactor so that we can have the URL to incoming packages<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\/\/ TODO \"github.com\/tianon\/dtodo\/src\/dnew\"\n\t\"dnew\"\n\n\t\"pault.ag\/go\/debian\/changelog\"\n\t\"pault.ag\/go\/debian\/control\"\n\t\"pault.ag\/go\/debian\/dependency\"\n\t\"pault.ag\/go\/resolver\"\n)\n\ntype Target struct {\n\tMirror string\n\tSuites []string\n\tComponents []string\n\tArches []string\n\n\tresolver.Candidates\n}\n\nfunc NewTarget(mirror string, suites, components, arches []string) (*Target, error) {\n\ttarget := Target{\n\t\tMirror: mirror,\n\t\tSuites: suites,\n\t\tComponents: components,\n\t\tArches: arches,\n\n\t\tCandidates: resolver.Candidates{},\n\t}\n\tfor _, suite := range suites {\n\t\tfor _, component := range components {\n\t\t\tfor _, arch := range arches {\n\t\t\t\terr := resolver.AppendBinaryIndex(&target.Candidates, mirror, suite, component, arch)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn &target, nil\n}\n\nfunc (target Target) UrlTo(bin control.BinaryIndex) string {\n\treturn target.Mirror + \"\/\" + bin.Filename\n}\n\nfunc main() {\n\tlog.SetFlags(log.Lshortfile)\n\n\t\/\/ TODO configurable path? perhaps allow for an optional *.dsc instead?\n\tcon, err := control.ParseControlFile(\"debian\/control\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tchg, err := changelog.ParseFileOne(\"debian\/changelog\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\t\/\/ TODO configurable or something to avoid guesswork\n\ttargetSuite := chg.Target\n\tif targetSuite == \"UNRELEASED\" {\n\t\t\/\/ check for \"Upload to XYZ.\" in changelog\n\t\tre := regexp.MustCompile(`^\\s*\\*?\\s*Upload\\s+to\\s+(\\S+?)\\.?(\\s+|$)`)\n\t\tmatches := re.FindStringSubmatch(chg.Changelog)\n\t\tif matches != nil {\n\t\t\ttargetSuite = matches[1]\n\t\t} else {\n\t\t\ttargetSuite = \"unstable\"\n\t\t}\n\t}\n\n\t\/\/ TODO configurable\n\tarches := []string{\"amd64\", \"i386\"}\n\tcomponents := []string{\"main\", \"contrib\", \"non-free\"}\n\n\tfmt.Printf(\"Target: %s\\n\", targetSuite)\n\tfmt.Printf(\"Architectures: %s\\n\", arches)\n\tfmt.Printf(\"Components: %s\\n\", components)\n\tfmt.Printf(\"Source: %s\\n\", con.Source.Source)\n\tfmt.Printf(\"Version: %s\\n\", chg.Version)\n\tfmt.Printf(\"\\n\")\n\n\tindex, err := NewTarget(\n\t\t\"http:\/\/httpredir.debian.org\/debian\",\n\t\t[]string{targetSuite},\n\t\tcomponents,\n\t\tarches,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\t\/\/ TODO use target suite to include more suites if necessary (ie, \"experimental\" needs \"sid\" too)\n\n\tincoming, err := NewTarget(\n\t\t\"http:\/\/incoming.debian.org\/debian-buildd\",\n\t\t[]string{\"buildd-\" + targetSuite},\n\t\tcomponents,\n\t\tarches,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tnewQueue, err := dnew.ParseNewUrl(dnew.New822)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\tnewBinaries := map[string]dnew.NewEntry{}\n\tfor _, newPkg := range newQueue {\n\t\tfor _, newBin := range newPkg.Binary {\n\t\t\tnewBinaries[newBin] = newPkg\n\t\t}\n\t}\n\n\tallDeps := dependency.Dependency{}\n\n\tbinRelation := dependency.Relation{}\n\tfor _, bin := range con.Binaries {\n\t\tbinRelation.Possibilities = append(binRelation.Possibilities, dependency.Possibility{\n\t\t\tName: bin.Package,\n\t\t\tVersion: &dependency.VersionRelation{\n\t\t\t\tOperator: \"=\",\n\t\t\t\tNumber: chg.Version.String(),\n\t\t\t},\n\t\t})\n\t}\n\tallDeps.Relations = append(allDeps.Relations, binRelation)\n\n\tallDeps.Relations = append(allDeps.Relations, con.Source.BuildDepends.Relations...)\n\tallDeps.Relations = append(allDeps.Relations, con.Source.BuildDependsIndep.Relations...)\n\n\tfor _, bin := range con.Binaries {\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Depends.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Recommends.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Suggests.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.Enhances.Relations...)\n\t\tallDeps.Relations = append(allDeps.Relations, bin.PreDepends.Relations...)\n\t}\n\n\tdepArch, err := dependency.ParseArch(\"any\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tseenRelations := map[string]bool{}\n\tfor _, relation := range allDeps.Relations {\n\t\trelationString := relation.String()\n\t\tif seenRelations[relationString] {\n\t\t\tcontinue\n\t\t}\n\t\tseenRelations[relationString] = true\n\n\t\tnotes := []string{}\n\t\tfor _, possi := range relation.Possibilities {\n\t\t\tif possi.Substvar {\n\t\t\t\t\/\/fmt.Printf(\"ignoring substvar %s\\n\", possi)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcan, why, _ := index.ExplainSatisfies(*depArch, possi)\n\t\t\tif !can {\n\t\t\t\tinCan, _, incomingBins := incoming.ExplainSatisfies(*depArch, possi)\n\t\t\t\tif !inCan {\n\t\t\t\t\tif newPkg, ok := newBinaries[possi.Name]; ok {\n\t\t\t\t\t\tnewUrl := fmt.Sprintf(\"https:\/\/ftp-master.debian.org\/new\/%s_%s.html\", newPkg.Source, newPkg.Version[0])\n\t\t\t\t\t\tnotes = append(notes, fmt.Sprintf(\"NEW (%s): %s\", possi.Name, newUrl))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnotes = append(notes, why)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tnotes = append(notes, fmt.Sprintf(\"incoming (%s): %s\", possi.Name, incoming.UrlTo(incomingBins[0])))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(notes) > 0 {\n\t\t\tfmt.Printf(\"Relation: %s\\n\", relation)\n\t\t\tif len(notes) > 1 {\n\t\t\t\tfmt.Printf(\"Notes:\\n %s\\n\", strings.Join(notes, \"\\n \"))\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Notes: %s\\n\", notes[0])\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ NOTE: If you change this file you must run \".\/mkbuiltin\"\n\/\/ to update builtin.c.boot. This is not done automatically\n\/\/ to avoid depending on having a working compiler binary.\n\n\/\/ +build ignore\n\npackage PACKAGE\n\n\/\/ emitted by compiler, not referred to by go programs\n\nfunc new(typ *byte) *any\nfunc panicindex()\nfunc panicslice()\nfunc throwreturn()\nfunc throwinit()\nfunc panicwrap(string, string, string)\n\nfunc panic(interface{})\nfunc recover(*int32) interface{}\n\nfunc printbool(bool)\nfunc printfloat(float64)\nfunc printint(int64)\nfunc printuint(uint64)\nfunc printcomplex(complex128)\nfunc printstring(string)\nfunc printpointer(any)\nfunc printiface(any)\nfunc printeface(any)\nfunc printslice(any)\nfunc printnl()\nfunc printsp()\nfunc goprintf()\n\n\/\/ filled in by compiler: int n, string, string, ...\nfunc concatstring()\n\n\/\/ filled in by compiler: Type*, int n, Slice, ...\nfunc append()\nfunc appendslice(typ *byte, x any, y []any) any\nfunc appendstr(typ *byte, x []byte, y string) []byte\n\nfunc cmpstring(string, string) int\nfunc slicestring(string, int, int) string\nfunc slicestring1(string, int) string\nfunc intstring(int64) string\nfunc slicebytetostring([]byte) string\nfunc slicerunetostring([]rune) string\nfunc stringtoslicebyte(string) []byte\nfunc stringtoslicerune(string) []rune\nfunc stringiter(string, int) int\nfunc stringiter2(string, int) (retk int, retv rune)\nfunc copy(to any, fr any, wid uint32) int\nfunc slicestringcopy(to any, fr any) int\n\n\/\/ interface conversions\nfunc convI2E(elem any) (ret any)\nfunc convI2I(typ *byte, elem any) (ret any)\nfunc convT2E(typ *byte, elem any) (ret any)\nfunc convT2I(typ *byte, typ2 *byte, elem any) (ret any)\n\n\/\/ interface type assertions x.(T)\nfunc assertE2E(typ *byte, iface any) (ret any)\nfunc assertE2E2(typ *byte, iface any) (ret any, ok bool)\nfunc assertE2I(typ *byte, iface any) (ret any)\nfunc assertE2I2(typ *byte, iface any) (ret any, ok bool)\nfunc assertE2T(typ *byte, iface any) (ret any)\nfunc assertE2T2(typ *byte, iface any) (ret any, ok bool)\nfunc assertI2E(typ *byte, iface any) (ret any)\nfunc assertI2E2(typ *byte, iface any) (ret any, ok bool)\nfunc assertI2I(typ *byte, iface any) (ret any)\nfunc assertI2I2(typ *byte, iface any) (ret any, ok bool)\nfunc assertI2T(typ *byte, iface any) (ret any)\nfunc assertI2T2(typ *byte, iface any) (ret any, ok bool)\n\nfunc ifaceeq(i1 any, i2 any) (ret bool)\nfunc efaceeq(i1 any, i2 any) (ret bool)\nfunc ifacethash(i1 any) (ret uint32)\nfunc efacethash(i1 any) (ret uint32)\n\nfunc equal(typ *byte, x1, x2 any) (ret bool)\n\n\/\/ *byte is really *runtime.Type\nfunc makemap(mapType *byte, hint int64) (hmap map[any]any)\nfunc mapaccess1(mapType *byte, hmap map[any]any, key any) (val any)\nfunc mapaccess2(mapType *byte, hmap map[any]any, key any) (val any, pres bool)\nfunc mapassign1(mapType *byte, hmap map[any]any, key any, val any)\nfunc mapassign2(mapType *byte, hmap map[any]any, key any, val any, pres bool)\nfunc mapiterinit(mapType *byte, hmap map[any]any, hiter *any)\nfunc mapdelete(mapType *byte, hmap map[any]any, key any)\nfunc mapiternext(hiter *any)\nfunc mapiter1(hiter *any) (key any)\nfunc mapiter2(hiter *any) (key any, val any)\n\n\/\/ *byte is really *runtime.Type\nfunc makechan(chanType *byte, hint int64) (hchan chan any)\nfunc chanrecv1(chanType *byte, hchan <-chan any) (elem any)\nfunc chanrecv2(chanType *byte, hchan <-chan any) (elem any, received bool)\nfunc chansend1(chanType *byte, hchan chan<- any, elem any)\nfunc closechan(hchan any)\n\nfunc selectnbsend(chanType *byte, hchan chan<- any, elem any) bool\nfunc selectnbrecv(chanType *byte, elem *any, hchan <-chan any) bool\nfunc selectnbrecv2(chanType *byte, elem *any, received *bool, hchan <-chan any) bool\n\nfunc newselect(size int) (sel *byte)\nfunc selectsend(sel *byte, hchan chan<- any, elem *any) (selected bool)\nfunc selectrecv(sel *byte, hchan <-chan any, elem *any) (selected bool)\nfunc selectrecv2(sel *byte, hchan <-chan any, elem *any, received *bool) (selected bool)\nfunc selectdefault(sel *byte) (selected bool)\nfunc selectgo(sel *byte)\nfunc block()\n\nfunc makeslice(typ *byte, nel int64, cap int64) (ary []any)\nfunc growslice(typ *byte, old []any, n int64) (ary []any)\nfunc sliceslice1(old []any, lb uint64, width uint64) (ary []any)\nfunc sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any)\nfunc slicearray(old *any, nel uint64, lb uint64, hb uint64, width uint64) (ary []any)\n\nfunc closure() \/\/ has args, but compiler fills in\n\nfunc memequal(eq *bool, size uintptr, x, y *any)\nfunc memequal8(eq *bool, size uintptr, x, y *any)\nfunc memequal16(eq *bool, size uintptr, x, y *any)\nfunc memequal32(eq *bool, size uintptr, x, y *any)\nfunc memequal64(eq *bool, size uintptr, x, y *any)\nfunc memequal128(eq *bool, size uintptr, x, y *any)\n\n\/\/ only used on 32-bit\nfunc int64div(int64, int64) int64\nfunc uint64div(uint64, uint64) uint64\nfunc int64mod(int64, int64) int64\nfunc uint64mod(uint64, uint64) uint64\nfunc float64toint64(float64) int64\nfunc float64touint64(float64) uint64\nfunc int64tofloat64(int64) float64\nfunc uint64tofloat64(uint64) float64\n\nfunc complex128div(num complex128, den complex128) (quo complex128)\n<commit_msg>gc: correct comment in runtime.go<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ NOTE: If you change this file you must run \".\/mkbuiltin\"\n\/\/ to update builtin.c. This is not done automatically\n\/\/ to avoid depending on having a working compiler binary.\n\n\/\/ +build ignore\n\npackage PACKAGE\n\n\/\/ emitted by compiler, not referred to by go programs\n\nfunc new(typ *byte) *any\nfunc panicindex()\nfunc panicslice()\nfunc throwreturn()\nfunc throwinit()\nfunc panicwrap(string, string, string)\n\nfunc panic(interface{})\nfunc recover(*int32) interface{}\n\nfunc printbool(bool)\nfunc printfloat(float64)\nfunc printint(int64)\nfunc printuint(uint64)\nfunc printcomplex(complex128)\nfunc printstring(string)\nfunc printpointer(any)\nfunc printiface(any)\nfunc printeface(any)\nfunc printslice(any)\nfunc printnl()\nfunc printsp()\nfunc goprintf()\n\n\/\/ filled in by compiler: int n, string, string, ...\nfunc concatstring()\n\n\/\/ filled in by compiler: Type*, int n, Slice, ...\nfunc append()\nfunc appendslice(typ *byte, x any, y []any) any\nfunc appendstr(typ *byte, x []byte, y string) []byte\n\nfunc cmpstring(string, string) int\nfunc slicestring(string, int, int) string\nfunc slicestring1(string, int) string\nfunc intstring(int64) string\nfunc slicebytetostring([]byte) string\nfunc slicerunetostring([]rune) string\nfunc stringtoslicebyte(string) []byte\nfunc stringtoslicerune(string) []rune\nfunc stringiter(string, int) int\nfunc stringiter2(string, int) (retk int, retv rune)\nfunc copy(to any, fr any, wid uint32) int\nfunc slicestringcopy(to any, fr any) int\n\n\/\/ interface conversions\nfunc convI2E(elem any) (ret any)\nfunc convI2I(typ *byte, elem any) (ret any)\nfunc convT2E(typ *byte, elem any) (ret any)\nfunc convT2I(typ *byte, typ2 *byte, elem any) (ret any)\n\n\/\/ interface type assertions x.(T)\nfunc assertE2E(typ *byte, iface any) (ret any)\nfunc assertE2E2(typ *byte, iface any) (ret any, ok bool)\nfunc assertE2I(typ *byte, iface any) (ret any)\nfunc assertE2I2(typ *byte, iface any) (ret any, ok bool)\nfunc assertE2T(typ *byte, iface any) (ret any)\nfunc assertE2T2(typ *byte, iface any) (ret any, ok bool)\nfunc assertI2E(typ *byte, iface any) (ret any)\nfunc assertI2E2(typ *byte, iface any) (ret any, ok bool)\nfunc assertI2I(typ *byte, iface any) (ret any)\nfunc assertI2I2(typ *byte, iface any) (ret any, ok bool)\nfunc assertI2T(typ *byte, iface any) (ret any)\nfunc assertI2T2(typ *byte, iface any) (ret any, ok bool)\n\nfunc ifaceeq(i1 any, i2 any) (ret bool)\nfunc efaceeq(i1 any, i2 any) (ret bool)\nfunc ifacethash(i1 any) (ret uint32)\nfunc efacethash(i1 any) (ret uint32)\n\nfunc equal(typ *byte, x1, x2 any) (ret bool)\n\n\/\/ *byte is really *runtime.Type\nfunc makemap(mapType *byte, hint int64) (hmap map[any]any)\nfunc mapaccess1(mapType *byte, hmap map[any]any, key any) (val any)\nfunc mapaccess2(mapType *byte, hmap map[any]any, key any) (val any, pres bool)\nfunc mapassign1(mapType *byte, hmap map[any]any, key any, val any)\nfunc mapassign2(mapType *byte, hmap map[any]any, key any, val any, pres bool)\nfunc mapiterinit(mapType *byte, hmap map[any]any, hiter *any)\nfunc mapdelete(mapType *byte, hmap map[any]any, key any)\nfunc mapiternext(hiter *any)\nfunc mapiter1(hiter *any) (key any)\nfunc mapiter2(hiter *any) (key any, val any)\n\n\/\/ *byte is really *runtime.Type\nfunc makechan(chanType *byte, hint int64) (hchan chan any)\nfunc chanrecv1(chanType *byte, hchan <-chan any) (elem any)\nfunc chanrecv2(chanType *byte, hchan <-chan any) (elem any, received bool)\nfunc chansend1(chanType *byte, hchan chan<- any, elem any)\nfunc closechan(hchan any)\n\nfunc selectnbsend(chanType *byte, hchan chan<- any, elem any) bool\nfunc selectnbrecv(chanType *byte, elem *any, hchan <-chan any) bool\nfunc selectnbrecv2(chanType *byte, elem *any, received *bool, hchan <-chan any) bool\n\nfunc newselect(size int) (sel *byte)\nfunc selectsend(sel *byte, hchan chan<- any, elem *any) (selected bool)\nfunc selectrecv(sel *byte, hchan <-chan any, elem *any) (selected bool)\nfunc selectrecv2(sel *byte, hchan <-chan any, elem *any, received *bool) (selected bool)\nfunc selectdefault(sel *byte) (selected bool)\nfunc selectgo(sel *byte)\nfunc block()\n\nfunc makeslice(typ *byte, nel int64, cap int64) (ary []any)\nfunc growslice(typ *byte, old []any, n int64) (ary []any)\nfunc sliceslice1(old []any, lb uint64, width uint64) (ary []any)\nfunc sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any)\nfunc slicearray(old *any, nel uint64, lb uint64, hb uint64, width uint64) (ary []any)\n\nfunc closure() \/\/ has args, but compiler fills in\n\nfunc memequal(eq *bool, size uintptr, x, y *any)\nfunc memequal8(eq *bool, size uintptr, x, y *any)\nfunc memequal16(eq *bool, size uintptr, x, y *any)\nfunc memequal32(eq *bool, size uintptr, x, y *any)\nfunc memequal64(eq *bool, size uintptr, x, y *any)\nfunc memequal128(eq *bool, size uintptr, x, y *any)\n\n\/\/ only used on 32-bit\nfunc int64div(int64, int64) int64\nfunc uint64div(uint64, uint64) uint64\nfunc int64mod(int64, int64) int64\nfunc uint64mod(uint64, uint64) uint64\nfunc float64toint64(float64) int64\nfunc float64touint64(float64) uint64\nfunc int64tofloat64(int64) float64\nfunc uint64tofloat64(uint64) float64\n\nfunc complex128div(num complex128, den complex128) (quo complex128)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ godoc: Go Documentation Server\n\n\/\/ Web server tree:\n\/\/\n\/\/\thttp:\/\/godoc\/\t\tmain landing page\n\/\/\thttp:\/\/godoc\/doc\/\tserve from $GOROOT\/doc - spec, mem, tutorial, etc.\n\/\/\thttp:\/\/godoc\/src\/\tserve files from $GOROOT\/src; .go gets pretty-printed\n\/\/\thttp:\/\/godoc\/cmd\/\tserve documentation about commands (TODO)\n\/\/\thttp:\/\/godoc\/pkg\/\tserve documentation about packages\n\/\/\t\t\t\t(idea is if you say import \"compress\/zlib\", you go to\n\/\/\t\t\t\thttp:\/\/godoc\/pkg\/compress\/zlib)\n\/\/\n\/\/ Command-line interface:\n\/\/\n\/\/\tgodoc packagepath [name ...]\n\/\/\n\/\/\tgodoc compress\/zlib\n\/\/\t\t- prints doc for package compress\/zlib\n\/\/\tgodoc crypto\/block Cipher NewCMAC\n\/\/\t\t- prints doc for Cipher and NewCMAC in package crypto\/block\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"http\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\tpathutil \"path\"\n\t\"runtime\"\n\t\"time\"\n)\n\nconst defaultAddr = \":6060\" \/\/ default webserver address\n\nvar (\n\t\/\/ periodic sync\n\tsyncCmd = flag.String(\"sync\", \"\", \"sync command; disabled if empty\")\n\tsyncMin = flag.Int(\"sync_minutes\", 0, \"sync interval in minutes; disabled if <= 0\")\n\tsyncDelay delayTime \/\/ actual sync delay in minutes; usually syncDelay == syncMin, but delay may back off exponentially\n\n\t\/\/ network\n\thttpAddr = flag.String(\"http\", \"\", \"HTTP service address (e.g., '\"+defaultAddr+\"')\")\n\tserverAddr = flag.String(\"server\", \"\", \"webserver address for command line searches\")\n\n\t\/\/ layout control\n\thtml = flag.Bool(\"html\", false, \"print HTML in command-line mode\")\n\tgenAST = flag.Bool(\"src\", false, \"print exported source in command-line mode\")\n\n\t\/\/ command-line searches\n\tquery = flag.Bool(\"q\", false, \"arguments are considered search queries\")\n)\n\n\nfunc serveError(c *http.Conn, r *http.Request, relpath string, err os.Error) {\n\tcontents := applyTemplate(errorHTML, \"errorHTML\", err) \/\/ err may contain an absolute path!\n\tservePage(c, \"File \"+relpath, \"\", contents)\n}\n\n\nfunc exec(c *http.Conn, args []string) (status int) {\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tlog.Stderrf(\"os.Pipe(): %v\\n\", err)\n\t\treturn 2\n\t}\n\n\tbin := args[0]\n\tfds := []*os.File{nil, w, w}\n\tif *verbose {\n\t\tlog.Stderrf(\"executing %v\", args)\n\t}\n\tpid, err := os.ForkExec(bin, args, os.Environ(), *goroot, fds)\n\tdefer r.Close()\n\tw.Close()\n\tif err != nil {\n\t\tlog.Stderrf(\"os.ForkExec(%q): %v\\n\", bin, err)\n\t\treturn 2\n\t}\n\n\tvar buf bytes.Buffer\n\tio.Copy(&buf, r)\n\twait, err := os.Wait(pid, 0)\n\tif err != nil {\n\t\tos.Stderr.Write(buf.Bytes())\n\t\tlog.Stderrf(\"os.Wait(%d, 0): %v\\n\", pid, err)\n\t\treturn 2\n\t}\n\tstatus = wait.ExitStatus()\n\tif !wait.Exited() || status > 1 {\n\t\tos.Stderr.Write(buf.Bytes())\n\t\tlog.Stderrf(\"executing %v failed (exit status = %d)\", args, status)\n\t\treturn\n\t}\n\n\tif *verbose {\n\t\tos.Stderr.Write(buf.Bytes())\n\t}\n\tif c != nil {\n\t\tc.SetHeader(\"content-type\", \"text\/plain; charset=utf-8\")\n\t\tc.Write(buf.Bytes())\n\t}\n\n\treturn\n}\n\n\n\/\/ Maximum directory depth, adjust as needed.\nconst maxDirDepth = 24\n\nfunc dosync(c *http.Conn, r *http.Request) {\n\targs := []string{\"\/bin\/sh\", \"-c\", *syncCmd}\n\tswitch exec(c, args) {\n\tcase 0:\n\t\t\/\/ sync succeeded and some files have changed;\n\t\t\/\/ update package tree.\n\t\t\/\/ TODO(gri): The directory tree may be temporarily out-of-sync.\n\t\t\/\/ Consider keeping separate time stamps so the web-\n\t\t\/\/ page can indicate this discrepancy.\n\t\tfsTree.set(newDirectory(*goroot, maxDirDepth))\n\t\tfallthrough\n\tcase 1:\n\t\t\/\/ sync failed because no files changed;\n\t\t\/\/ don't change the package tree\n\t\tsyncDelay.set(*syncMin) \/\/ revert to regular sync schedule\n\tdefault:\n\t\t\/\/ sync failed because of an error - back off exponentially, but try at least once a day\n\t\tsyncDelay.backoff(24 * 60)\n\t}\n}\n\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr,\n\t\t\"usage: godoc package [name ...]\\n\"+\n\t\t\t\"\tgodoc -http=\"+defaultAddr+\"\\n\")\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}\n\n\nfunc loggingHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(c *http.Conn, req *http.Request) {\n\t\tlog.Stderrf(\"%s\\t%s\", c.RemoteAddr, req.URL)\n\t\th.ServeHTTP(c, req)\n\t})\n}\n\n\nfunc remoteSearch(query string) (res *http.Response, err os.Error) {\n\tsearch := \"\/search?f=text&q=\" + http.URLEscape(query)\n\n\t\/\/ list of addresses to try\n\tvar addrs []string\n\tif *serverAddr != \"\" {\n\t\t\/\/ explicit server address - only try this one\n\t\taddrs = []string{*serverAddr}\n\t} else {\n\t\taddrs = []string{\n\t\t\tdefaultAddr,\n\t\t\t\"golang.org\",\n\t\t}\n\t}\n\n\t\/\/ remote search\n\tfor _, addr := range addrs {\n\t\turl := \"http:\/\/\" + addr + search\n\t\tres, _, err = http.Get(url)\n\t\tif err == nil && res.StatusCode == http.StatusOK {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err == nil && res.StatusCode != http.StatusOK {\n\t\terr = os.NewError(res.Status)\n\t}\n\n\treturn\n}\n\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\t\/\/ Check usage: either server and no args, or command line and args\n\tif (*httpAddr != \"\") != (flag.NArg() == 0) {\n\t\tusage()\n\t}\n\n\tif *tabwidth < 0 {\n\t\tlog.Exitf(\"negative tabwidth %d\", *tabwidth)\n\t}\n\n\tinitHandlers()\n\treadTemplates()\n\n\tif *httpAddr != \"\" {\n\t\t\/\/ HTTP server mode.\n\t\tvar handler http.Handler = http.DefaultServeMux\n\t\tif *verbose {\n\t\t\tlog.Stderrf(\"Go Documentation Server\\n\")\n\t\t\tlog.Stderrf(\"version = %s\\n\", runtime.Version())\n\t\t\tlog.Stderrf(\"address = %s\\n\", *httpAddr)\n\t\t\tlog.Stderrf(\"goroot = %s\\n\", *goroot)\n\t\t\tlog.Stderrf(\"tabwidth = %d\\n\", *tabwidth)\n\t\t\tif !fsMap.IsEmpty() {\n\t\t\t\tlog.Stderr(\"user-defined mapping:\")\n\t\t\t\tfsMap.Fprint(os.Stderr)\n\t\t\t}\n\t\t\thandler = loggingHandler(handler)\n\t\t}\n\n\t\tregisterPublicHandlers(http.DefaultServeMux)\n\t\tif *syncCmd != \"\" {\n\t\t\thttp.Handle(\"\/debug\/sync\", http.HandlerFunc(dosync))\n\t\t}\n\n\t\t\/\/ Initialize directory tree with corresponding timestamp.\n\t\t\/\/ Do it in two steps:\n\t\t\/\/ 1) set timestamp right away so that the indexer is kicked on\n\t\tfsTree.set(nil)\n\t\t\/\/ 2) compute initial directory tree in a goroutine so that launch is quick\n\t\tgo func() { fsTree.set(newDirectory(*goroot, maxDirDepth)) }()\n\n\t\t\/\/ Start sync goroutine, if enabled.\n\t\tif *syncCmd != \"\" && *syncMin > 0 {\n\t\t\tsyncDelay.set(*syncMin) \/\/ initial sync delay\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tdosync(nil, nil)\n\t\t\t\t\tdelay, _ := syncDelay.get()\n\t\t\t\t\tif *verbose {\n\t\t\t\t\t\tlog.Stderrf(\"next sync in %dmin\", delay.(int))\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(int64(delay.(int)) * 60e9)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\t\/\/ Start indexing goroutine.\n\t\tgo indexer()\n\n\t\t\/\/ The server may have been restarted; always wait 1sec to\n\t\t\/\/ give the forking server a chance to shut down and release\n\t\t\/\/ the http port.\n\t\t\/\/ TODO(gri): Do we still need this?\n\t\ttime.Sleep(1e9)\n\n\t\t\/\/ Start http server.\n\t\tif err := http.ListenAndServe(*httpAddr, handler); err != nil {\n\t\t\tlog.Exitf(\"ListenAndServe %s: %v\", *httpAddr, err)\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Command line mode.\n\tif *html {\n\t\tpackageText = packageHTML\n\t\tsearchText = packageHTML\n\t}\n\n\tif *query {\n\t\t\/\/ Command-line queries.\n\t\tfor i := 0; i < flag.NArg(); i++ {\n\t\t\tres, err := remoteSearch(flag.Arg(i))\n\t\t\tif err != nil {\n\t\t\t\tlog.Exitf(\"remoteSearch: %s\", err)\n\t\t\t}\n\t\t\tio.Copy(os.Stdout, res.Body)\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ determine paths\n\tpath := flag.Arg(0)\n\tif len(path) > 0 && path[0] == '.' {\n\t\t\/\/ assume cwd; don't assume -goroot\n\t\tcwd, _ := os.Getwd() \/\/ ignore errors\n\t\tpath = pathutil.Join(cwd, path)\n\t}\n\trelpath := path\n\tabspath := path\n\tif len(path) > 0 && path[0] != '\/' {\n\t\tabspath = absolutePath(path, pkgHandler.fsRoot)\n\t} else {\n\t\trelpath = relativePath(path)\n\t}\n\n\t\/\/ TODO(gri): Provide a mechanism (flag?) to select a package\n\t\/\/ if there are multiple packages in a directory.\n\tinfo := pkgHandler.getPageInfo(abspath, relpath, \"\", *genAST, true)\n\n\tif info.PAst == nil && info.PDoc == nil && info.Dirs == nil {\n\t\t\/\/ try again, this time assume it's a command\n\t\tif len(path) > 0 && path[0] != '\/' {\n\t\t\tabspath = absolutePath(path, cmdHandler.fsRoot)\n\t\t}\n\t\tinfo = cmdHandler.getPageInfo(abspath, relpath, \"\", false, false)\n\t}\n\n\tif info.PDoc != nil && flag.NArg() > 1 {\n\t\targs := flag.Args()\n\t\tinfo.PDoc.Filter(args[1:])\n\t}\n\n\tif err := packageText.Execute(info, os.Stdout); err != nil {\n\t\tlog.Stderrf(\"packageText.Execute: %s\", err)\n\t}\n}\n<commit_msg>godoc: export pprof debug information<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ godoc: Go Documentation Server\n\n\/\/ Web server tree:\n\/\/\n\/\/\thttp:\/\/godoc\/\t\tmain landing page\n\/\/\thttp:\/\/godoc\/doc\/\tserve from $GOROOT\/doc - spec, mem, tutorial, etc.\n\/\/\thttp:\/\/godoc\/src\/\tserve files from $GOROOT\/src; .go gets pretty-printed\n\/\/\thttp:\/\/godoc\/cmd\/\tserve documentation about commands (TODO)\n\/\/\thttp:\/\/godoc\/pkg\/\tserve documentation about packages\n\/\/\t\t\t\t(idea is if you say import \"compress\/zlib\", you go to\n\/\/\t\t\t\thttp:\/\/godoc\/pkg\/compress\/zlib)\n\/\/\n\/\/ Command-line interface:\n\/\/\n\/\/\tgodoc packagepath [name ...]\n\/\/\n\/\/\tgodoc compress\/zlib\n\/\/\t\t- prints doc for package compress\/zlib\n\/\/\tgodoc crypto\/block Cipher NewCMAC\n\/\/\t\t- prints doc for Cipher and NewCMAC in package crypto\/block\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"http\"\n\t_ \"http\/pprof\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\tpathutil \"path\"\n\t\"runtime\"\n\t\"time\"\n)\n\nconst defaultAddr = \":6060\" \/\/ default webserver address\n\nvar (\n\t\/\/ periodic sync\n\tsyncCmd = flag.String(\"sync\", \"\", \"sync command; disabled if empty\")\n\tsyncMin = flag.Int(\"sync_minutes\", 0, \"sync interval in minutes; disabled if <= 0\")\n\tsyncDelay delayTime \/\/ actual sync delay in minutes; usually syncDelay == syncMin, but delay may back off exponentially\n\n\t\/\/ network\n\thttpAddr = flag.String(\"http\", \"\", \"HTTP service address (e.g., '\"+defaultAddr+\"')\")\n\tserverAddr = flag.String(\"server\", \"\", \"webserver address for command line searches\")\n\n\t\/\/ layout control\n\thtml = flag.Bool(\"html\", false, \"print HTML in command-line mode\")\n\tgenAST = flag.Bool(\"src\", false, \"print exported source in command-line mode\")\n\n\t\/\/ command-line searches\n\tquery = flag.Bool(\"q\", false, \"arguments are considered search queries\")\n)\n\n\nfunc serveError(c *http.Conn, r *http.Request, relpath string, err os.Error) {\n\tcontents := applyTemplate(errorHTML, \"errorHTML\", err) \/\/ err may contain an absolute path!\n\tservePage(c, \"File \"+relpath, \"\", contents)\n}\n\n\nfunc exec(c *http.Conn, args []string) (status int) {\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tlog.Stderrf(\"os.Pipe(): %v\\n\", err)\n\t\treturn 2\n\t}\n\n\tbin := args[0]\n\tfds := []*os.File{nil, w, w}\n\tif *verbose {\n\t\tlog.Stderrf(\"executing %v\", args)\n\t}\n\tpid, err := os.ForkExec(bin, args, os.Environ(), *goroot, fds)\n\tdefer r.Close()\n\tw.Close()\n\tif err != nil {\n\t\tlog.Stderrf(\"os.ForkExec(%q): %v\\n\", bin, err)\n\t\treturn 2\n\t}\n\n\tvar buf bytes.Buffer\n\tio.Copy(&buf, r)\n\twait, err := os.Wait(pid, 0)\n\tif err != nil {\n\t\tos.Stderr.Write(buf.Bytes())\n\t\tlog.Stderrf(\"os.Wait(%d, 0): %v\\n\", pid, err)\n\t\treturn 2\n\t}\n\tstatus = wait.ExitStatus()\n\tif !wait.Exited() || status > 1 {\n\t\tos.Stderr.Write(buf.Bytes())\n\t\tlog.Stderrf(\"executing %v failed (exit status = %d)\", args, status)\n\t\treturn\n\t}\n\n\tif *verbose {\n\t\tos.Stderr.Write(buf.Bytes())\n\t}\n\tif c != nil {\n\t\tc.SetHeader(\"content-type\", \"text\/plain; charset=utf-8\")\n\t\tc.Write(buf.Bytes())\n\t}\n\n\treturn\n}\n\n\n\/\/ Maximum directory depth, adjust as needed.\nconst maxDirDepth = 24\n\nfunc dosync(c *http.Conn, r *http.Request) {\n\targs := []string{\"\/bin\/sh\", \"-c\", *syncCmd}\n\tswitch exec(c, args) {\n\tcase 0:\n\t\t\/\/ sync succeeded and some files have changed;\n\t\t\/\/ update package tree.\n\t\t\/\/ TODO(gri): The directory tree may be temporarily out-of-sync.\n\t\t\/\/ Consider keeping separate time stamps so the web-\n\t\t\/\/ page can indicate this discrepancy.\n\t\tfsTree.set(newDirectory(*goroot, maxDirDepth))\n\t\tfallthrough\n\tcase 1:\n\t\t\/\/ sync failed because no files changed;\n\t\t\/\/ don't change the package tree\n\t\tsyncDelay.set(*syncMin) \/\/ revert to regular sync schedule\n\tdefault:\n\t\t\/\/ sync failed because of an error - back off exponentially, but try at least once a day\n\t\tsyncDelay.backoff(24 * 60)\n\t}\n}\n\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr,\n\t\t\"usage: godoc package [name ...]\\n\"+\n\t\t\t\"\tgodoc -http=\"+defaultAddr+\"\\n\")\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}\n\n\nfunc loggingHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(c *http.Conn, req *http.Request) {\n\t\tlog.Stderrf(\"%s\\t%s\", c.RemoteAddr, req.URL)\n\t\th.ServeHTTP(c, req)\n\t})\n}\n\n\nfunc remoteSearch(query string) (res *http.Response, err os.Error) {\n\tsearch := \"\/search?f=text&q=\" + http.URLEscape(query)\n\n\t\/\/ list of addresses to try\n\tvar addrs []string\n\tif *serverAddr != \"\" {\n\t\t\/\/ explicit server address - only try this one\n\t\taddrs = []string{*serverAddr}\n\t} else {\n\t\taddrs = []string{\n\t\t\tdefaultAddr,\n\t\t\t\"golang.org\",\n\t\t}\n\t}\n\n\t\/\/ remote search\n\tfor _, addr := range addrs {\n\t\turl := \"http:\/\/\" + addr + search\n\t\tres, _, err = http.Get(url)\n\t\tif err == nil && res.StatusCode == http.StatusOK {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err == nil && res.StatusCode != http.StatusOK {\n\t\terr = os.NewError(res.Status)\n\t}\n\n\treturn\n}\n\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\t\/\/ Check usage: either server and no args, or command line and args\n\tif (*httpAddr != \"\") != (flag.NArg() == 0) {\n\t\tusage()\n\t}\n\n\tif *tabwidth < 0 {\n\t\tlog.Exitf(\"negative tabwidth %d\", *tabwidth)\n\t}\n\n\tinitHandlers()\n\treadTemplates()\n\n\tif *httpAddr != \"\" {\n\t\t\/\/ HTTP server mode.\n\t\tvar handler http.Handler = http.DefaultServeMux\n\t\tif *verbose {\n\t\t\tlog.Stderrf(\"Go Documentation Server\\n\")\n\t\t\tlog.Stderrf(\"version = %s\\n\", runtime.Version())\n\t\t\tlog.Stderrf(\"address = %s\\n\", *httpAddr)\n\t\t\tlog.Stderrf(\"goroot = %s\\n\", *goroot)\n\t\t\tlog.Stderrf(\"tabwidth = %d\\n\", *tabwidth)\n\t\t\tif !fsMap.IsEmpty() {\n\t\t\t\tlog.Stderr(\"user-defined mapping:\")\n\t\t\t\tfsMap.Fprint(os.Stderr)\n\t\t\t}\n\t\t\thandler = loggingHandler(handler)\n\t\t}\n\n\t\tregisterPublicHandlers(http.DefaultServeMux)\n\t\tif *syncCmd != \"\" {\n\t\t\thttp.Handle(\"\/debug\/sync\", http.HandlerFunc(dosync))\n\t\t}\n\n\t\t\/\/ Initialize directory tree with corresponding timestamp.\n\t\t\/\/ Do it in two steps:\n\t\t\/\/ 1) set timestamp right away so that the indexer is kicked on\n\t\tfsTree.set(nil)\n\t\t\/\/ 2) compute initial directory tree in a goroutine so that launch is quick\n\t\tgo func() { fsTree.set(newDirectory(*goroot, maxDirDepth)) }()\n\n\t\t\/\/ Start sync goroutine, if enabled.\n\t\tif *syncCmd != \"\" && *syncMin > 0 {\n\t\t\tsyncDelay.set(*syncMin) \/\/ initial sync delay\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tdosync(nil, nil)\n\t\t\t\t\tdelay, _ := syncDelay.get()\n\t\t\t\t\tif *verbose {\n\t\t\t\t\t\tlog.Stderrf(\"next sync in %dmin\", delay.(int))\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(int64(delay.(int)) * 60e9)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\t\/\/ Start indexing goroutine.\n\t\tgo indexer()\n\n\t\t\/\/ The server may have been restarted; always wait 1sec to\n\t\t\/\/ give the forking server a chance to shut down and release\n\t\t\/\/ the http port.\n\t\t\/\/ TODO(gri): Do we still need this?\n\t\ttime.Sleep(1e9)\n\n\t\t\/\/ Start http server.\n\t\tif err := http.ListenAndServe(*httpAddr, handler); err != nil {\n\t\t\tlog.Exitf(\"ListenAndServe %s: %v\", *httpAddr, err)\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Command line mode.\n\tif *html {\n\t\tpackageText = packageHTML\n\t\tsearchText = packageHTML\n\t}\n\n\tif *query {\n\t\t\/\/ Command-line queries.\n\t\tfor i := 0; i < flag.NArg(); i++ {\n\t\t\tres, err := remoteSearch(flag.Arg(i))\n\t\t\tif err != nil {\n\t\t\t\tlog.Exitf(\"remoteSearch: %s\", err)\n\t\t\t}\n\t\t\tio.Copy(os.Stdout, res.Body)\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ determine paths\n\tpath := flag.Arg(0)\n\tif len(path) > 0 && path[0] == '.' {\n\t\t\/\/ assume cwd; don't assume -goroot\n\t\tcwd, _ := os.Getwd() \/\/ ignore errors\n\t\tpath = pathutil.Join(cwd, path)\n\t}\n\trelpath := path\n\tabspath := path\n\tif len(path) > 0 && path[0] != '\/' {\n\t\tabspath = absolutePath(path, pkgHandler.fsRoot)\n\t} else {\n\t\trelpath = relativePath(path)\n\t}\n\n\t\/\/ TODO(gri): Provide a mechanism (flag?) to select a package\n\t\/\/ if there are multiple packages in a directory.\n\tinfo := pkgHandler.getPageInfo(abspath, relpath, \"\", *genAST, true)\n\n\tif info.PAst == nil && info.PDoc == nil && info.Dirs == nil {\n\t\t\/\/ try again, this time assume it's a command\n\t\tif len(path) > 0 && path[0] != '\/' {\n\t\t\tabspath = absolutePath(path, cmdHandler.fsRoot)\n\t\t}\n\t\tinfo = cmdHandler.getPageInfo(abspath, relpath, \"\", false, false)\n\t}\n\n\tif info.PDoc != nil && flag.NArg() > 1 {\n\t\targs := flag.Args()\n\t\tinfo.PDoc.Filter(args[1:])\n\t}\n\n\tif err := packageText.Execute(info, os.Stdout); err != nil {\n\t\tlog.Stderrf(\"packageText.Execute: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"internal\/obscuretestdata\"\n\t\"internal\/testenv\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"text\/template\"\n)\n\nvar testnmpath string \/\/ path to nm command created for testing purposes\n\n\/\/ The TestMain function creates a nm command for testing purposes and\n\/\/ deletes it after the tests have been run.\nfunc TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}\n\nfunc testMain(m *testing.M) int {\n\tif !testenv.HasGoBuild() {\n\t\treturn 0\n\t}\n\n\ttmpDir, err := os.MkdirTemp(\"\", \"TestNM\")\n\tif err != nil {\n\t\tfmt.Println(\"TempDir failed:\", err)\n\t\treturn 2\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\ttestnmpath = filepath.Join(tmpDir, \"testnm.exe\")\n\tgotool, err := testenv.GoTool()\n\tif err != nil {\n\t\tfmt.Println(\"GoTool failed:\", err)\n\t\treturn 2\n\t}\n\tout, err := exec.Command(gotool, \"build\", \"-o\", testnmpath, \"cmd\/nm\").CombinedOutput()\n\tif err != nil {\n\t\tfmt.Printf(\"go build -o %v cmd\/nm: %v\\n%s\", testnmpath, err, string(out))\n\t\treturn 2\n\t}\n\n\treturn m.Run()\n}\n\nfunc TestNonGoExecs(t *testing.T) {\n\tt.Parallel()\n\ttestfiles := []string{\n\t\t\"debug\/elf\/testdata\/gcc-386-freebsd-exec\",\n\t\t\"debug\/elf\/testdata\/gcc-amd64-linux-exec\",\n\t\t\"debug\/macho\/testdata\/gcc-386-darwin-exec.base64\", \/\/ golang.org\/issue\/34986\n\t\t\"debug\/macho\/testdata\/gcc-amd64-darwin-exec.base64\", \/\/ golang.org\/issue\/34986\n\t\t\/\/ \"debug\/pe\/testdata\/gcc-amd64-mingw-exec\", \/\/ no symbols!\n\t\t\"debug\/pe\/testdata\/gcc-386-mingw-exec\",\n\t\t\"debug\/plan9obj\/testdata\/amd64-plan9-exec\",\n\t\t\"debug\/plan9obj\/testdata\/386-plan9-exec\",\n\t\t\"internal\/xcoff\/testdata\/gcc-ppc64-aix-dwarf2-exec\",\n\t}\n\tfor _, f := range testfiles {\n\t\texepath := filepath.Join(testenv.GOROOT(t), \"src\", f)\n\t\tif strings.HasSuffix(f, \".base64\") {\n\t\t\ttf, err := obscuretestdata.DecodeToTempFile(exepath)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"obscuretestdata.DecodeToTempFile(%s): %v\", exepath, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdefer os.Remove(tf)\n\t\t\texepath = tf\n\t\t}\n\n\t\tcmd := exec.Command(testnmpath, exepath)\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"go tool nm %v: %v\\n%s\", exepath, err, string(out))\n\t\t}\n\t}\n}\n\nfunc testGoExec(t *testing.T, iscgo, isexternallinker bool) {\n\tt.Parallel()\n\ttmpdir, err := os.MkdirTemp(\"\", \"TestGoExec\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tmpdir)\n\n\tsrc := filepath.Join(tmpdir, \"a.go\")\n\tfile, err := os.Create(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = template.Must(template.New(\"main\").Parse(testexec)).Execute(file, iscgo)\n\tif e := file.Close(); err == nil {\n\t\terr = e\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texe := filepath.Join(tmpdir, \"a.exe\")\n\targs := []string{\"build\", \"-o\", exe}\n\tif iscgo {\n\t\tlinkmode := \"internal\"\n\t\tif isexternallinker {\n\t\t\tlinkmode = \"external\"\n\t\t}\n\t\targs = append(args, \"-ldflags\", \"-linkmode=\"+linkmode)\n\t}\n\targs = append(args, src)\n\tout, err := exec.Command(testenv.GoToolPath(t), args...).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building test executable failed: %s %s\", err, out)\n\t}\n\n\tout, err = exec.Command(exe).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"running test executable failed: %s %s\", err, out)\n\t}\n\tnames := make(map[string]string)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tf := strings.Split(line, \"=\")\n\t\tif len(f) != 2 {\n\t\t\tt.Fatalf(\"unexpected output line: %q\", line)\n\t\t}\n\t\tnames[\"main.\"+f[0]] = f[1]\n\t}\n\n\truntimeSyms := map[string]string{\n\t\t\"runtime.text\": \"T\",\n\t\t\"runtime.etext\": \"T\",\n\t\t\"runtime.rodata\": \"R\",\n\t\t\"runtime.erodata\": \"R\",\n\t\t\"runtime.epclntab\": \"R\",\n\t\t\"runtime.noptrdata\": \"D\",\n\t}\n\n\tif runtime.GOOS == \"aix\" && iscgo {\n\t\t\/\/ pclntab is moved to .data section on AIX.\n\t\truntimeSyms[\"runtime.epclntab\"] = \"D\"\n\t}\n\n\tout, err = exec.Command(testnmpath, exe).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool nm: %v\\n%s\", err, string(out))\n\t}\n\n\trelocated := func(code string) bool {\n\t\tif runtime.GOOS == \"aix\" {\n\t\t\t\/\/ On AIX, .data and .bss addresses are changed by the loader.\n\t\t\t\/\/ Therefore, the values returned by the exec aren't the same\n\t\t\t\/\/ than the ones inside the symbol table.\n\t\t\t\/\/ In case of cgo, .text symbols are also changed.\n\t\t\tswitch code {\n\t\t\tcase \"T\", \"t\", \"R\", \"r\":\n\t\t\t\treturn iscgo\n\t\t\tcase \"D\", \"d\", \"B\", \"b\":\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn true\n\t\t}\n\t\tif runtime.GOOS == \"darwin\" && runtime.GOARCH == \"arm64\" {\n\t\t\treturn true \/\/ On darwin\/arm64 everything is PIE\n\t\t}\n\t\treturn false\n\t}\n\n\tdups := make(map[string]bool)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tf := strings.Fields(line)\n\t\tif len(f) < 3 {\n\t\t\tcontinue\n\t\t}\n\t\tname := f[2]\n\t\tif addr, found := names[name]; found {\n\t\t\tif want, have := addr, \"0x\"+f[0]; have != want {\n\t\t\t\tif !relocated(f[1]) {\n\t\t\t\t\tt.Errorf(\"want %s address for %s symbol, but have %s\", want, name, have)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdelete(names, name)\n\t\t}\n\t\tif _, found := dups[name]; found {\n\t\t\tt.Errorf(\"duplicate name of %q is found\", name)\n\t\t}\n\t\tif stype, found := runtimeSyms[name]; found {\n\t\t\tif runtime.GOOS == \"plan9\" && stype == \"R\" {\n\t\t\t\t\/\/ no read-only data segment symbol on Plan 9\n\t\t\t\tstype = \"D\"\n\t\t\t}\n\t\t\tif want, have := stype, strings.ToUpper(f[1]); have != want {\n\t\t\t\tt.Errorf(\"want %s type for %s symbol, but have %s\", want, name, have)\n\t\t\t}\n\t\t\tdelete(runtimeSyms, name)\n\t\t}\n\t}\n\tif len(names) > 0 {\n\t\tt.Errorf(\"executable is missing %v symbols\", names)\n\t}\n\tif len(runtimeSyms) > 0 {\n\t\tt.Errorf(\"executable is missing %v symbols\", runtimeSyms)\n\t}\n}\n\nfunc TestGoExec(t *testing.T) {\n\ttestGoExec(t, false, false)\n}\n\nfunc testGoLib(t *testing.T, iscgo bool) {\n\tt.Parallel()\n\ttmpdir, err := os.MkdirTemp(\"\", \"TestGoLib\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tmpdir)\n\n\tgopath := filepath.Join(tmpdir, \"gopath\")\n\tlibpath := filepath.Join(gopath, \"src\", \"mylib\")\n\n\terr = os.MkdirAll(libpath, 0777)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsrc := filepath.Join(libpath, \"a.go\")\n\tfile, err := os.Create(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = template.Must(template.New(\"mylib\").Parse(testlib)).Execute(file, iscgo)\n\tif e := file.Close(); err == nil {\n\t\terr = e\n\t}\n\tif err == nil {\n\t\terr = os.WriteFile(filepath.Join(libpath, \"go.mod\"), []byte(\"module mylib\\n\"), 0666)\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\targs := []string{\"install\", \"mylib\"}\n\tcmd := exec.Command(testenv.GoToolPath(t), args...)\n\tcmd.Dir = libpath\n\tcmd.Env = append(os.Environ(), \"GOPATH=\"+gopath)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building test lib failed: %s %s\", err, out)\n\t}\n\tpat := filepath.Join(gopath, \"pkg\", \"*\", \"mylib.a\")\n\tms, err := filepath.Glob(pat)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(ms) == 0 {\n\t\tt.Fatalf(\"cannot found paths for pattern %s\", pat)\n\t}\n\tmylib := ms[0]\n\n\tout, err = exec.Command(testnmpath, mylib).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool nm: %v\\n%s\", err, string(out))\n\t}\n\ttype symType struct {\n\t\tType string\n\t\tName string\n\t\tCSym bool\n\t\tFound bool\n\t}\n\tvar syms = []symType{\n\t\t{\"B\", \"mylib.Testdata\", false, false},\n\t\t{\"T\", \"mylib.Testfunc\", false, false},\n\t}\n\tif iscgo {\n\t\tsyms = append(syms, symType{\"B\", \"mylib.TestCgodata\", false, false})\n\t\tsyms = append(syms, symType{\"T\", \"mylib.TestCgofunc\", false, false})\n\t\tif runtime.GOOS == \"darwin\" || runtime.GOOS == \"ios\" || (runtime.GOOS == \"windows\" && runtime.GOARCH == \"386\") {\n\t\t\tsyms = append(syms, symType{\"D\", \"_cgodata\", true, false})\n\t\t\tsyms = append(syms, symType{\"T\", \"_cgofunc\", true, false})\n\t\t} else if runtime.GOOS == \"aix\" {\n\t\t\tsyms = append(syms, symType{\"D\", \"cgodata\", true, false})\n\t\t\tsyms = append(syms, symType{\"T\", \".cgofunc\", true, false})\n\t\t} else {\n\t\t\tsyms = append(syms, symType{\"D\", \"cgodata\", true, false})\n\t\t\tsyms = append(syms, symType{\"T\", \"cgofunc\", true, false})\n\t\t}\n\t}\n\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tf := strings.Fields(line)\n\t\tvar typ, name string\n\t\tvar csym bool\n\t\tif iscgo {\n\t\t\tif len(f) < 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcsym = !strings.Contains(f[0], \"_go_.o\")\n\t\t\ttyp = f[2]\n\t\t\tname = f[3]\n\t\t} else {\n\t\t\tif len(f) < 3 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttyp = f[1]\n\t\t\tname = f[2]\n\t\t}\n\t\tfor i := range syms {\n\t\t\tsym := &syms[i]\n\t\t\tif sym.Type == typ && sym.Name == name && sym.CSym == csym {\n\t\t\t\tif sym.Found {\n\t\t\t\t\tt.Fatalf(\"duplicate symbol %s %s\", sym.Type, sym.Name)\n\t\t\t\t}\n\t\t\t\tsym.Found = true\n\t\t\t}\n\t\t}\n\t}\n\tfor _, sym := range syms {\n\t\tif !sym.Found {\n\t\t\tt.Errorf(\"cannot found symbol %s %s\", sym.Type, sym.Name)\n\t\t}\n\t}\n}\n\nfunc TestGoLib(t *testing.T) {\n\ttestGoLib(t, false)\n}\n\nconst testexec = `\npackage main\n\nimport \"fmt\"\n{{if .}}import \"C\"\n{{end}}\n\nfunc main() {\n\ttestfunc()\n}\n\nvar testdata uint32\n\nfunc testfunc() {\n\tfmt.Printf(\"main=%p\\n\", main)\n\tfmt.Printf(\"testfunc=%p\\n\", testfunc)\n\tfmt.Printf(\"testdata=%p\\n\", &testdata)\n}\n`\n\nconst testlib = `\npackage mylib\n\n{{if .}}\n\/\/ int cgodata = 5;\n\/\/ void cgofunc(void) {}\nimport \"C\"\n\nvar TestCgodata = C.cgodata\n\nfunc TestCgofunc() {\n\tC.cgofunc()\n}\n{{end}}\n\nvar Testdata uint32\n\nfunc Testfunc() {}\n`\n<commit_msg>cmd\/nm: don't rely on an erroneous install target in tests<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"internal\/obscuretestdata\"\n\t\"internal\/testenv\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"text\/template\"\n)\n\nvar testnmpath string \/\/ path to nm command created for testing purposes\n\n\/\/ The TestMain function creates a nm command for testing purposes and\n\/\/ deletes it after the tests have been run.\nfunc TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}\n\nfunc testMain(m *testing.M) int {\n\tif !testenv.HasGoBuild() {\n\t\treturn 0\n\t}\n\n\ttmpDir, err := os.MkdirTemp(\"\", \"TestNM\")\n\tif err != nil {\n\t\tfmt.Println(\"TempDir failed:\", err)\n\t\treturn 2\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\ttestnmpath = filepath.Join(tmpDir, \"testnm.exe\")\n\tgotool, err := testenv.GoTool()\n\tif err != nil {\n\t\tfmt.Println(\"GoTool failed:\", err)\n\t\treturn 2\n\t}\n\tout, err := exec.Command(gotool, \"build\", \"-o\", testnmpath, \"cmd\/nm\").CombinedOutput()\n\tif err != nil {\n\t\tfmt.Printf(\"go build -o %v cmd\/nm: %v\\n%s\", testnmpath, err, string(out))\n\t\treturn 2\n\t}\n\n\treturn m.Run()\n}\n\nfunc TestNonGoExecs(t *testing.T) {\n\tt.Parallel()\n\ttestfiles := []string{\n\t\t\"debug\/elf\/testdata\/gcc-386-freebsd-exec\",\n\t\t\"debug\/elf\/testdata\/gcc-amd64-linux-exec\",\n\t\t\"debug\/macho\/testdata\/gcc-386-darwin-exec.base64\", \/\/ golang.org\/issue\/34986\n\t\t\"debug\/macho\/testdata\/gcc-amd64-darwin-exec.base64\", \/\/ golang.org\/issue\/34986\n\t\t\/\/ \"debug\/pe\/testdata\/gcc-amd64-mingw-exec\", \/\/ no symbols!\n\t\t\"debug\/pe\/testdata\/gcc-386-mingw-exec\",\n\t\t\"debug\/plan9obj\/testdata\/amd64-plan9-exec\",\n\t\t\"debug\/plan9obj\/testdata\/386-plan9-exec\",\n\t\t\"internal\/xcoff\/testdata\/gcc-ppc64-aix-dwarf2-exec\",\n\t}\n\tfor _, f := range testfiles {\n\t\texepath := filepath.Join(testenv.GOROOT(t), \"src\", f)\n\t\tif strings.HasSuffix(f, \".base64\") {\n\t\t\ttf, err := obscuretestdata.DecodeToTempFile(exepath)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"obscuretestdata.DecodeToTempFile(%s): %v\", exepath, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdefer os.Remove(tf)\n\t\t\texepath = tf\n\t\t}\n\n\t\tcmd := exec.Command(testnmpath, exepath)\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"go tool nm %v: %v\\n%s\", exepath, err, string(out))\n\t\t}\n\t}\n}\n\nfunc testGoExec(t *testing.T, iscgo, isexternallinker bool) {\n\tt.Parallel()\n\ttmpdir, err := os.MkdirTemp(\"\", \"TestGoExec\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tmpdir)\n\n\tsrc := filepath.Join(tmpdir, \"a.go\")\n\tfile, err := os.Create(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = template.Must(template.New(\"main\").Parse(testexec)).Execute(file, iscgo)\n\tif e := file.Close(); err == nil {\n\t\terr = e\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texe := filepath.Join(tmpdir, \"a.exe\")\n\targs := []string{\"build\", \"-o\", exe}\n\tif iscgo {\n\t\tlinkmode := \"internal\"\n\t\tif isexternallinker {\n\t\t\tlinkmode = \"external\"\n\t\t}\n\t\targs = append(args, \"-ldflags\", \"-linkmode=\"+linkmode)\n\t}\n\targs = append(args, src)\n\tout, err := exec.Command(testenv.GoToolPath(t), args...).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building test executable failed: %s %s\", err, out)\n\t}\n\n\tout, err = exec.Command(exe).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"running test executable failed: %s %s\", err, out)\n\t}\n\tnames := make(map[string]string)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tf := strings.Split(line, \"=\")\n\t\tif len(f) != 2 {\n\t\t\tt.Fatalf(\"unexpected output line: %q\", line)\n\t\t}\n\t\tnames[\"main.\"+f[0]] = f[1]\n\t}\n\n\truntimeSyms := map[string]string{\n\t\t\"runtime.text\": \"T\",\n\t\t\"runtime.etext\": \"T\",\n\t\t\"runtime.rodata\": \"R\",\n\t\t\"runtime.erodata\": \"R\",\n\t\t\"runtime.epclntab\": \"R\",\n\t\t\"runtime.noptrdata\": \"D\",\n\t}\n\n\tif runtime.GOOS == \"aix\" && iscgo {\n\t\t\/\/ pclntab is moved to .data section on AIX.\n\t\truntimeSyms[\"runtime.epclntab\"] = \"D\"\n\t}\n\n\tout, err = exec.Command(testnmpath, exe).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool nm: %v\\n%s\", err, string(out))\n\t}\n\n\trelocated := func(code string) bool {\n\t\tif runtime.GOOS == \"aix\" {\n\t\t\t\/\/ On AIX, .data and .bss addresses are changed by the loader.\n\t\t\t\/\/ Therefore, the values returned by the exec aren't the same\n\t\t\t\/\/ than the ones inside the symbol table.\n\t\t\t\/\/ In case of cgo, .text symbols are also changed.\n\t\t\tswitch code {\n\t\t\tcase \"T\", \"t\", \"R\", \"r\":\n\t\t\t\treturn iscgo\n\t\t\tcase \"D\", \"d\", \"B\", \"b\":\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn true\n\t\t}\n\t\tif runtime.GOOS == \"darwin\" && runtime.GOARCH == \"arm64\" {\n\t\t\treturn true \/\/ On darwin\/arm64 everything is PIE\n\t\t}\n\t\treturn false\n\t}\n\n\tdups := make(map[string]bool)\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tf := strings.Fields(line)\n\t\tif len(f) < 3 {\n\t\t\tcontinue\n\t\t}\n\t\tname := f[2]\n\t\tif addr, found := names[name]; found {\n\t\t\tif want, have := addr, \"0x\"+f[0]; have != want {\n\t\t\t\tif !relocated(f[1]) {\n\t\t\t\t\tt.Errorf(\"want %s address for %s symbol, but have %s\", want, name, have)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdelete(names, name)\n\t\t}\n\t\tif _, found := dups[name]; found {\n\t\t\tt.Errorf(\"duplicate name of %q is found\", name)\n\t\t}\n\t\tif stype, found := runtimeSyms[name]; found {\n\t\t\tif runtime.GOOS == \"plan9\" && stype == \"R\" {\n\t\t\t\t\/\/ no read-only data segment symbol on Plan 9\n\t\t\t\tstype = \"D\"\n\t\t\t}\n\t\t\tif want, have := stype, strings.ToUpper(f[1]); have != want {\n\t\t\t\tt.Errorf(\"want %s type for %s symbol, but have %s\", want, name, have)\n\t\t\t}\n\t\t\tdelete(runtimeSyms, name)\n\t\t}\n\t}\n\tif len(names) > 0 {\n\t\tt.Errorf(\"executable is missing %v symbols\", names)\n\t}\n\tif len(runtimeSyms) > 0 {\n\t\tt.Errorf(\"executable is missing %v symbols\", runtimeSyms)\n\t}\n}\n\nfunc TestGoExec(t *testing.T) {\n\ttestGoExec(t, false, false)\n}\n\nfunc testGoLib(t *testing.T, iscgo bool) {\n\tt.Parallel()\n\ttmpdir, err := os.MkdirTemp(\"\", \"TestGoLib\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tmpdir)\n\n\tgopath := filepath.Join(tmpdir, \"gopath\")\n\tlibpath := filepath.Join(gopath, \"src\", \"mylib\")\n\n\terr = os.MkdirAll(libpath, 0777)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsrc := filepath.Join(libpath, \"a.go\")\n\tfile, err := os.Create(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = template.Must(template.New(\"mylib\").Parse(testlib)).Execute(file, iscgo)\n\tif e := file.Close(); err == nil {\n\t\terr = e\n\t}\n\tif err == nil {\n\t\terr = os.WriteFile(filepath.Join(libpath, \"go.mod\"), []byte(\"module mylib\\n\"), 0666)\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-buildmode=archive\", \"-o\", \"mylib.a\", \".\")\n\tcmd.Dir = libpath\n\tcmd.Env = append(os.Environ(), \"GOPATH=\"+gopath)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building test lib failed: %s %s\", err, out)\n\t}\n\tmylib := filepath.Join(libpath, \"mylib.a\")\n\n\tout, err = exec.Command(testnmpath, mylib).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool nm: %v\\n%s\", err, string(out))\n\t}\n\ttype symType struct {\n\t\tType string\n\t\tName string\n\t\tCSym bool\n\t\tFound bool\n\t}\n\tvar syms = []symType{\n\t\t{\"B\", \"mylib.Testdata\", false, false},\n\t\t{\"T\", \"mylib.Testfunc\", false, false},\n\t}\n\tif iscgo {\n\t\tsyms = append(syms, symType{\"B\", \"mylib.TestCgodata\", false, false})\n\t\tsyms = append(syms, symType{\"T\", \"mylib.TestCgofunc\", false, false})\n\t\tif runtime.GOOS == \"darwin\" || runtime.GOOS == \"ios\" || (runtime.GOOS == \"windows\" && runtime.GOARCH == \"386\") {\n\t\t\tsyms = append(syms, symType{\"D\", \"_cgodata\", true, false})\n\t\t\tsyms = append(syms, symType{\"T\", \"_cgofunc\", true, false})\n\t\t} else if runtime.GOOS == \"aix\" {\n\t\t\tsyms = append(syms, symType{\"D\", \"cgodata\", true, false})\n\t\t\tsyms = append(syms, symType{\"T\", \".cgofunc\", true, false})\n\t\t} else {\n\t\t\tsyms = append(syms, symType{\"D\", \"cgodata\", true, false})\n\t\t\tsyms = append(syms, symType{\"T\", \"cgofunc\", true, false})\n\t\t}\n\t}\n\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tf := strings.Fields(line)\n\t\tvar typ, name string\n\t\tvar csym bool\n\t\tif iscgo {\n\t\t\tif len(f) < 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcsym = !strings.Contains(f[0], \"_go_.o\")\n\t\t\ttyp = f[2]\n\t\t\tname = f[3]\n\t\t} else {\n\t\t\tif len(f) < 3 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttyp = f[1]\n\t\t\tname = f[2]\n\t\t}\n\t\tfor i := range syms {\n\t\t\tsym := &syms[i]\n\t\t\tif sym.Type == typ && sym.Name == name && sym.CSym == csym {\n\t\t\t\tif sym.Found {\n\t\t\t\t\tt.Fatalf(\"duplicate symbol %s %s\", sym.Type, sym.Name)\n\t\t\t\t}\n\t\t\t\tsym.Found = true\n\t\t\t}\n\t\t}\n\t}\n\tfor _, sym := range syms {\n\t\tif !sym.Found {\n\t\t\tt.Errorf(\"cannot found symbol %s %s\", sym.Type, sym.Name)\n\t\t}\n\t}\n}\n\nfunc TestGoLib(t *testing.T) {\n\ttestGoLib(t, false)\n}\n\nconst testexec = `\npackage main\n\nimport \"fmt\"\n{{if .}}import \"C\"\n{{end}}\n\nfunc main() {\n\ttestfunc()\n}\n\nvar testdata uint32\n\nfunc testfunc() {\n\tfmt.Printf(\"main=%p\\n\", main)\n\tfmt.Printf(\"testfunc=%p\\n\", testfunc)\n\tfmt.Printf(\"testdata=%p\\n\", &testdata)\n}\n`\n\nconst testlib = `\npackage mylib\n\n{{if .}}\n\/\/ int cgodata = 5;\n\/\/ void cgofunc(void) {}\nimport \"C\"\n\nvar TestCgodata = C.cgodata\n\nfunc TestCgofunc() {\n\tC.cgofunc()\n}\n{{end}}\n\nvar Testdata uint32\n\nfunc Testfunc() {}\n`\n<|endoftext|>"} {"text":"<commit_before>\/**\nCopyright (c) 2016 The ConnectorDB Contributors\nLicensed under the MIT license.\n**\/\npackage connectordb\n\nimport (\n\tpconfig \"config\/permissions\"\n\t\"connectordb\/authoperator\/permissions\"\n\t\"connectordb\/datastream\"\n\t\"connectordb\/messenger\"\n\t\"connectordb\/query\"\n\t\"connectordb\/users\"\n\t\"errors\"\n)\n\nvar (\n\t\/\/ ErrTimestampOrder is thrown when the tiemstamps are not increasing\n\tErrTimestampOrder = errors.New(\"Timestamps are not ordered!\")\n)\n\nfunc (db *Database) getStreamPath(strm *users.Stream) (*users.User, *users.Device, string, error) {\n\tdev, err := db.ReadDeviceByID(strm.DeviceID)\n\tif err != nil {\n\t\treturn nil, nil, \"\", err\n\t}\n\tusr, err := db.ReadUserByID(dev.UserID)\n\tif err != nil {\n\t\treturn nil, nil, \"\", err\n\t}\n\treturn usr, dev, usr.Name + \"\/\" + dev.Name + \"\/\" + strm.Name, nil\n}\n\n\/\/LengthStreamByID returns the total number of datapoints in the stream by ID\nfunc (db *Database) LengthStreamByID(streamID int64, substream string) (int64, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn db.DataStream.StreamLength(strm.DeviceID, strm.StreamID, substream)\n}\n\n\/\/TimeToIndexStreamByID returns the index for the given timestamp\nfunc (db *Database) TimeToIndexStreamByID(streamID int64, substream string, time float64) (int64, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn db.DataStream.GetTimeIndex(strm.DeviceID, streamID, substream, time)\n}\n\n\/\/InsertStreamByID inserts into the stream given by the ID\nfunc (db *Database) InsertStreamByID(streamID int64, substream string, data datastream.DatapointArray, restamp bool) error {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata.SetZeroTime()\n\t\/\/Now check that everything is okay\n\tif !strm.Validate(data) {\n\t\treturn datastream.ErrInvalidDatapoint\n\t}\n\tif !data.IsTimestampOrdered() {\n\t\treturn ErrTimestampOrder\n\t}\n\n\tu, _, streampath, err := db.getStreamPath(strm)\n\tif substream != \"\" {\n\t\tstreampath = streampath + \"\/\" + substream\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !strm.Ephemeral {\n\n\t\tr := permissions.GetUserRole(pconfig.Get(), u)\n\t\t_, err = db.DataStream.Insert(strm.DeviceID, strm.StreamID, substream, data, restamp, r.MaxDeviceSize, r.MaxStreamSize)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn db.Messenger.Publish(streampath, messenger.Message{streampath, \"\", data})\n}\n\n\/\/GetStreamTimeRangeByID reads time range by ID\nfunc (db *Database) GetStreamTimeRangeByID(streamID int64, substream string, t1 float64, t2 float64, limit int64, transform string) (datastream.DataRange, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdr, err := db.DataStream.TRange(strm.DeviceID, strm.StreamID, substream, t1, t2)\n\tif limit > 0 {\n\t\tdr = datastream.NewNumRange(dr, limit)\n\t}\n\t\/\/Add a transform to the resulting data range if one is wanted\n\tif transform != \"\" {\n\t\ttr, err := query.NewExtendedTransformRange(dr, transform)\n\t\tif err != nil {\n\t\t\tdr.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tdr = tr\n\t}\n\n\treturn dr, err\n}\n\n\/\/GetShiftedStreamTimeRangeByID reads time range by ID with an index shift\nfunc (db *Database) GetShiftedStreamTimeRangeByID(streamID int64, substream string, t1 float64, t2 float64, shift, limit int64, transform string) (datastream.DataRange, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdr, err := db.DataStream.TimePlusIndexRange(strm.DeviceID, strm.StreamID, substream, t1, t2, shift)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif limit > 0 {\n\t\tdr = datastream.NewNumRange(dr, limit)\n\t}\n\t\/\/Add a transform to the resulting data range if one is wanted\n\tif transform != \"\" {\n\t\ttr, err := query.NewExtendedTransformRange(dr, transform)\n\t\tif err != nil {\n\t\t\tdr.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tdr = tr\n\t}\n\n\treturn dr, err\n}\n\n\/\/GetStreamIndexRangeByID reads index range by ID\nfunc (db *Database) GetStreamIndexRangeByID(streamID int64, substream string, i1 int64, i2 int64, transform string) (datastream.DataRange, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdr, err := db.DataStream.IRange(strm.DeviceID, strm.StreamID, substream, i1, i2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Add a transform to the resulting data range if one is wanted\n\tif transform != \"\" {\n\t\ttr, err := query.NewExtendedTransformRange(dr, transform)\n\t\tif err != nil {\n\t\t\tdr.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tdr = tr\n\t}\n\treturn dr, err\n}\n<commit_msg>Moved limit to after transform<commit_after>\/**\nCopyright (c) 2016 The ConnectorDB Contributors\nLicensed under the MIT license.\n**\/\npackage connectordb\n\nimport (\n\tpconfig \"config\/permissions\"\n\t\"connectordb\/authoperator\/permissions\"\n\t\"connectordb\/datastream\"\n\t\"connectordb\/messenger\"\n\t\"connectordb\/query\"\n\t\"connectordb\/users\"\n\t\"errors\"\n)\n\nvar (\n\t\/\/ ErrTimestampOrder is thrown when the tiemstamps are not increasing\n\tErrTimestampOrder = errors.New(\"Timestamps are not ordered!\")\n)\n\nfunc (db *Database) getStreamPath(strm *users.Stream) (*users.User, *users.Device, string, error) {\n\tdev, err := db.ReadDeviceByID(strm.DeviceID)\n\tif err != nil {\n\t\treturn nil, nil, \"\", err\n\t}\n\tusr, err := db.ReadUserByID(dev.UserID)\n\tif err != nil {\n\t\treturn nil, nil, \"\", err\n\t}\n\treturn usr, dev, usr.Name + \"\/\" + dev.Name + \"\/\" + strm.Name, nil\n}\n\n\/\/LengthStreamByID returns the total number of datapoints in the stream by ID\nfunc (db *Database) LengthStreamByID(streamID int64, substream string) (int64, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn db.DataStream.StreamLength(strm.DeviceID, strm.StreamID, substream)\n}\n\n\/\/TimeToIndexStreamByID returns the index for the given timestamp\nfunc (db *Database) TimeToIndexStreamByID(streamID int64, substream string, time float64) (int64, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn db.DataStream.GetTimeIndex(strm.DeviceID, streamID, substream, time)\n}\n\n\/\/InsertStreamByID inserts into the stream given by the ID\nfunc (db *Database) InsertStreamByID(streamID int64, substream string, data datastream.DatapointArray, restamp bool) error {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata.SetZeroTime()\n\t\/\/Now check that everything is okay\n\tif !strm.Validate(data) {\n\t\treturn datastream.ErrInvalidDatapoint\n\t}\n\tif !data.IsTimestampOrdered() {\n\t\treturn ErrTimestampOrder\n\t}\n\n\tu, _, streampath, err := db.getStreamPath(strm)\n\tif substream != \"\" {\n\t\tstreampath = streampath + \"\/\" + substream\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !strm.Ephemeral {\n\n\t\tr := permissions.GetUserRole(pconfig.Get(), u)\n\t\t_, err = db.DataStream.Insert(strm.DeviceID, strm.StreamID, substream, data, restamp, r.MaxDeviceSize, r.MaxStreamSize)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn db.Messenger.Publish(streampath, messenger.Message{streampath, \"\", data})\n}\n\n\/\/GetStreamTimeRangeByID reads time range by ID\nfunc (db *Database) GetStreamTimeRangeByID(streamID int64, substream string, t1 float64, t2 float64, limit int64, transform string) (datastream.DataRange, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdr, err := db.DataStream.TRange(strm.DeviceID, strm.StreamID, substream, t1, t2)\n\n\t\/\/Add a transform to the resulting data range if one is wanted\n\tif transform != \"\" {\n\t\ttr, err := query.NewExtendedTransformRange(dr, transform)\n\t\tif err != nil {\n\t\t\tdr.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tdr = tr\n\t}\n\n\tif limit > 0 {\n\t\tdr = datastream.NewNumRange(dr, limit)\n\t}\n\n\treturn dr, err\n}\n\n\/\/GetShiftedStreamTimeRangeByID reads time range by ID with an index shift\nfunc (db *Database) GetShiftedStreamTimeRangeByID(streamID int64, substream string, t1 float64, t2 float64, shift, limit int64, transform string) (datastream.DataRange, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdr, err := db.DataStream.TimePlusIndexRange(strm.DeviceID, strm.StreamID, substream, t1, t2, shift)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Add a transform to the resulting data range if one is wanted\n\tif transform != \"\" {\n\t\ttr, err := query.NewExtendedTransformRange(dr, transform)\n\t\tif err != nil {\n\t\t\tdr.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tdr = tr\n\t}\n\tif limit > 0 {\n\t\tdr = datastream.NewNumRange(dr, limit)\n\t}\n\n\treturn dr, err\n}\n\n\/\/GetStreamIndexRangeByID reads index range by ID\nfunc (db *Database) GetStreamIndexRangeByID(streamID int64, substream string, i1 int64, i2 int64, transform string) (datastream.DataRange, error) {\n\tstrm, err := db.ReadStreamByID(streamID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdr, err := db.DataStream.IRange(strm.DeviceID, strm.StreamID, substream, i1, i2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Add a transform to the resulting data range if one is wanted\n\tif transform != \"\" {\n\t\ttr, err := query.NewExtendedTransformRange(dr, transform)\n\t\tif err != nil {\n\t\t\tdr.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tdr = tr\n\t}\n\treturn dr, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2019 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage executor\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tnull \"gopkg.in\/guregu\/null.v3\"\n\n\t\"github.com\/loadimpact\/k6\/lib\"\n\t\"github.com\/loadimpact\/k6\/lib\/types\"\n)\n\nfunc TestVariableLoopingVUsRun(t *testing.T) {\n\tt.Parallel()\n\n\tconfig := VariableLoopingVUsConfig{\n\t\tBaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0)},\n\t\tGracefulRampDown: types.NullDurationFrom(0),\n\t\tStartVUs: null.IntFrom(5),\n\t\tStages: []Stage{\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(1 * time.Second),\n\t\t\t\tTarget: null.IntFrom(5),\n\t\t\t},\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(0),\n\t\t\t\tTarget: null.IntFrom(3),\n\t\t\t},\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(1 * time.Second),\n\t\t\t\tTarget: null.IntFrom(3),\n\t\t\t},\n\t\t},\n\t}\n\n\tvar iterCount int64\n\tes := lib.NewExecutionState(lib.Options{}, 10, 50)\n\tvar ctx, cancel, executor, _ = setupExecutor(\n\t\tt, config, es,\n\t\tsimpleRunner(func(ctx context.Context) error {\n\t\t\t\/\/ Sleeping for a weird duration somewhat offset from the\n\t\t\t\/\/ executor ticks to hopefully keep race conditions out of\n\t\t\t\/\/ our control from failing the test.\n\t\t\ttime.Sleep(300 * time.Millisecond)\n\t\t\tatomic.AddInt64(&iterCount, 1)\n\t\t\treturn nil\n\t\t}),\n\t)\n\tdefer cancel()\n\n\tsampleTimes := []time.Duration{\n\t\t500 * time.Millisecond,\n\t\t1000 * time.Millisecond,\n\t\t700 * time.Millisecond,\n\t}\n\n\terrCh := make(chan error)\n\tgo func() { errCh <- executor.Run(ctx, nil) }()\n\n\tvar result = make([]int64, len(sampleTimes))\n\tfor i, d := range sampleTimes {\n\t\ttime.Sleep(d)\n\t\tresult[i] = es.GetCurrentlyActiveVUsCount()\n\t}\n\n\trequire.NoError(t, <-errCh)\n\n\tassert.Equal(t, []int64{5, 3, 0}, result)\n\tassert.Equal(t, int64(29), iterCount)\n}\n\n\/\/ Ensure there's no wobble of VUs during graceful ramp-down, without segments.\n\/\/ See https:\/\/github.com\/loadimpact\/k6\/issues\/1296\nfunc TestVariableLoopingVUsRampDownNoWobble(t *testing.T) {\n\tt.Parallel()\n\n\tconfig := VariableLoopingVUsConfig{\n\t\tBaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0)},\n\t\tGracefulRampDown: types.NullDurationFrom(1 * time.Second),\n\t\tStartVUs: null.IntFrom(0),\n\t\tStages: []Stage{\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(3 * time.Second),\n\t\t\t\tTarget: null.IntFrom(10),\n\t\t\t},\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(2 * time.Second),\n\t\t\t\tTarget: null.IntFrom(0),\n\t\t\t},\n\t\t},\n\t}\n\n\tes := lib.NewExecutionState(lib.Options{}, 10, 50)\n\tvar ctx, cancel, executor, _ = setupExecutor(\n\t\tt, config, es,\n\t\tsimpleRunner(func(ctx context.Context) error {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\treturn nil\n\t\t}),\n\t)\n\tdefer cancel()\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\tresult []int64\n\t\tm sync.Mutex\n\t)\n\n\tsampleActiveVUs := func(delay time.Duration) {\n\t\ttime.Sleep(delay)\n\t\tm.Lock()\n\t\tresult = append(result, es.GetCurrentlyActiveVUsCount())\n\t\tm.Unlock()\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tsampleActiveVUs(100 * time.Millisecond)\n\t\tsampleActiveVUs(3 * time.Second)\n\t\ttime.AfterFunc(2*time.Second, func() {\n\t\t\tsampleActiveVUs(0)\n\t\t})\n\t\ttime.Sleep(1 * time.Second)\n\t\t\/\/ Sample ramp-down at a higher frequency\n\t\tfor i := 0; i < 15; i++ {\n\t\t\tsampleActiveVUs(100 * time.Millisecond)\n\t\t}\n\t}()\n\n\terr := executor.Run(ctx, nil)\n\n\twg.Wait()\n\trequire.NoError(t, err)\n\tassert.Equal(t, int64(0), result[0])\n\tassert.Equal(t, int64(10), result[1])\n\tassert.Equal(t, int64(0), result[len(result)-1])\n\n\tvar curr int64\n\tlast := result[2]\n\t\/\/ Check all ramp-down samples\n\tfor i := 3; i < len(result[2:]); i++ {\n\t\tcurr = result[i]\n\t\t\/\/ Detect ramp-ups, missteps (e.g. 7 -> 4), but ignore pauses\n\t\tif curr > last || (curr != last && curr != last-1) {\n\t\t\tassert.FailNow(t,\n\t\t\t\tfmt.Sprintf(\"ramping down wobble bug - \"+\n\t\t\t\t\t\"current: %d, previous: %d\\nVU samples: %v\", curr, last, result))\n\t\t}\n\t\tlast = curr\n\t}\n}\n<commit_msg>Make TestVariableLoopingVUsRampDownNoWobble more deterministic<commit_after>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2019 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage executor\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tnull \"gopkg.in\/guregu\/null.v3\"\n\n\t\"github.com\/loadimpact\/k6\/lib\"\n\t\"github.com\/loadimpact\/k6\/lib\/types\"\n)\n\nfunc TestVariableLoopingVUsRun(t *testing.T) {\n\tt.Parallel()\n\n\tconfig := VariableLoopingVUsConfig{\n\t\tBaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0)},\n\t\tGracefulRampDown: types.NullDurationFrom(0),\n\t\tStartVUs: null.IntFrom(5),\n\t\tStages: []Stage{\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(1 * time.Second),\n\t\t\t\tTarget: null.IntFrom(5),\n\t\t\t},\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(0),\n\t\t\t\tTarget: null.IntFrom(3),\n\t\t\t},\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(1 * time.Second),\n\t\t\t\tTarget: null.IntFrom(3),\n\t\t\t},\n\t\t},\n\t}\n\n\tvar iterCount int64\n\tes := lib.NewExecutionState(lib.Options{}, 10, 50)\n\tvar ctx, cancel, executor, _ = setupExecutor(\n\t\tt, config, es,\n\t\tsimpleRunner(func(ctx context.Context) error {\n\t\t\t\/\/ Sleeping for a weird duration somewhat offset from the\n\t\t\t\/\/ executor ticks to hopefully keep race conditions out of\n\t\t\t\/\/ our control from failing the test.\n\t\t\ttime.Sleep(300 * time.Millisecond)\n\t\t\tatomic.AddInt64(&iterCount, 1)\n\t\t\treturn nil\n\t\t}),\n\t)\n\tdefer cancel()\n\n\tsampleTimes := []time.Duration{\n\t\t500 * time.Millisecond,\n\t\t1000 * time.Millisecond,\n\t\t700 * time.Millisecond,\n\t}\n\n\terrCh := make(chan error)\n\tgo func() { errCh <- executor.Run(ctx, nil) }()\n\n\tvar result = make([]int64, len(sampleTimes))\n\tfor i, d := range sampleTimes {\n\t\ttime.Sleep(d)\n\t\tresult[i] = es.GetCurrentlyActiveVUsCount()\n\t}\n\n\trequire.NoError(t, <-errCh)\n\n\tassert.Equal(t, []int64{5, 3, 0}, result)\n\tassert.Equal(t, int64(29), iterCount)\n}\n\n\/\/ Ensure there's no wobble of VUs during graceful ramp-down, without segments.\n\/\/ See https:\/\/github.com\/loadimpact\/k6\/issues\/1296\nfunc TestVariableLoopingVUsRampDownNoWobble(t *testing.T) {\n\tt.Parallel()\n\n\tconfig := VariableLoopingVUsConfig{\n\t\tBaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0)},\n\t\tGracefulRampDown: types.NullDurationFrom(1 * time.Second),\n\t\tStartVUs: null.IntFrom(0),\n\t\tStages: []Stage{\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(3 * time.Second),\n\t\t\t\tTarget: null.IntFrom(10),\n\t\t\t},\n\t\t\t{\n\t\t\t\tDuration: types.NullDurationFrom(2 * time.Second),\n\t\t\t\tTarget: null.IntFrom(0),\n\t\t\t},\n\t\t},\n\t}\n\n\tes := lib.NewExecutionState(lib.Options{}, 10, 50)\n\tvar ctx, cancel, executor, _ = setupExecutor(\n\t\tt, config, es,\n\t\tsimpleRunner(func(ctx context.Context) error {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\treturn nil\n\t\t}),\n\t)\n\tdefer cancel()\n\n\tsampleTimes := []time.Duration{\n\t\t100 * time.Millisecond,\n\t\t3400 * time.Millisecond,\n\t}\n\tconst rampDownSamples = 50\n\n\terrCh := make(chan error)\n\tgo func() { errCh <- executor.Run(ctx, nil) }()\n\n\tvar result = make([]int64, len(sampleTimes)+rampDownSamples)\n\tfor i, d := range sampleTimes {\n\t\ttime.Sleep(d)\n\t\tresult[i] = es.GetCurrentlyActiveVUsCount()\n\t}\n\n\t\/\/ Sample ramp-down at a higher rate\n\tfor i := len(sampleTimes); i < rampDownSamples; i++ {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tresult[i] = es.GetCurrentlyActiveVUsCount()\n\t}\n\n\trequire.NoError(t, <-errCh)\n\n\t\/\/ Some baseline checks\n\tassert.Equal(t, int64(0), result[0])\n\tassert.Equal(t, int64(10), result[1])\n\tassert.Equal(t, int64(0), result[len(result)-1])\n\n\tvar curr int64\n\tlast := result[2]\n\t\/\/ Check all ramp-down samples for wobble\n\tfor i := 3; i < len(result[2:]); i++ {\n\t\tcurr = result[i]\n\t\t\/\/ Detect ramp-ups, missteps (e.g. 7 -> 4), but ignore pauses (repeats)\n\t\tif curr > last || (curr != last && curr != last-1) {\n\t\t\tassert.FailNow(t,\n\t\t\t\tfmt.Sprintf(\"ramping down wobble bug - \"+\n\t\t\t\t\t\"current: %d, previous: %d\\nVU samples: %v\", curr, last, result))\n\t\t}\n\t\tlast = curr\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package uaa_client\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/lager\"\n)\n\ntype BadUaaResponse struct {\n\tStatusCode int\n\tUaaResponseBody string\n}\n\nfunc (r BadUaaResponse) Error() string {\n\treturn fmt.Sprintf(\"bad uaa response: %d: %s\", r.StatusCode, r.UaaResponseBody)\n}\n\ntype Client struct {\n\tHost string\n\tName string\n\tSecret string\n\tHTTPClient httpClient\n\tWarrantClient warrantClient\n\tLogger lager.Logger\n}\n\n\/\/go:generate counterfeiter -o ..\/fakes\/warrant_client.go --fake-name WarrantClient . warrantClient\ntype warrantClient interface {\n\tGetToken(clientName, clientSecret string) (string, error)\n}\n\n\/\/go:generate counterfeiter -o ..\/fakes\/http_client.go --fake-name HTTPClient . httpClient\ntype httpClient interface {\n\tDo(*http.Request) (*http.Response, error)\n}\n\ntype CheckTokenResponse struct {\n\tScope []string `json:\"scope\"`\n\tUserName string `json:\"user_name\"`\n}\n\nfunc (c *Client) GetToken() (string, error) {\n\ttoken, err := c.WarrantClient.GetToken(c.Name, c.Secret)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"get token failed: %s\", err)\n\t}\n\treturn token, nil\n}\n\nfunc (c *Client) CheckToken(token string) (CheckTokenResponse, error) {\n\treqURL := fmt.Sprintf(\"%s\/check_token\", c.Host)\n\tbodyString := \"token=\" + token\n\trequest, err := http.NewRequest(\"POST\", reqURL, strings.NewReader(bodyString))\n\trequest.SetBasicAuth(c.Name, c.Secret)\n\trequest.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\tc.Logger.Debug(\"check-token\", lager.Data{\n\t\t\"URL\": request.URL,\n\t\t\"Header\": request.Header,\n\t\t\"Body\": bodyString,\n\t})\n\n\tresp, err := c.HTTPClient.Do(request)\n\tif err != nil {\n\t\treturn CheckTokenResponse{}, fmt.Errorf(\"http client: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\trespBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn CheckTokenResponse{}, fmt.Errorf(\"read body: %s\", err)\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\terr = BadUaaResponse{\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tUaaResponseBody: string(respBytes),\n\t\t}\n\t\treturn CheckTokenResponse{}, err\n\t}\n\n\tresponseStruct := CheckTokenResponse{}\n\terr = json.Unmarshal(respBytes, &responseStruct)\n\tif err != nil {\n\t\treturn CheckTokenResponse{}, fmt.Errorf(\"unmarshal json: %s\", err)\n\t}\n\treturn responseStruct, nil\n}\n<commit_msg>Remove token from debug log on policy server<commit_after>package uaa_client\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/lager\"\n)\n\ntype BadUaaResponse struct {\n\tStatusCode int\n\tUaaResponseBody string\n}\n\nfunc (r BadUaaResponse) Error() string {\n\treturn fmt.Sprintf(\"bad uaa response: %d: %s\", r.StatusCode, r.UaaResponseBody)\n}\n\ntype Client struct {\n\tHost string\n\tName string\n\tSecret string\n\tHTTPClient httpClient\n\tWarrantClient warrantClient\n\tLogger lager.Logger\n}\n\n\/\/go:generate counterfeiter -o ..\/fakes\/warrant_client.go --fake-name WarrantClient . warrantClient\ntype warrantClient interface {\n\tGetToken(clientName, clientSecret string) (string, error)\n}\n\n\/\/go:generate counterfeiter -o ..\/fakes\/http_client.go --fake-name HTTPClient . httpClient\ntype httpClient interface {\n\tDo(*http.Request) (*http.Response, error)\n}\n\ntype CheckTokenResponse struct {\n\tScope []string `json:\"scope\"`\n\tUserName string `json:\"user_name\"`\n}\n\nfunc (c *Client) GetToken() (string, error) {\n\ttoken, err := c.WarrantClient.GetToken(c.Name, c.Secret)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"get token failed: %s\", err)\n\t}\n\treturn token, nil\n}\n\nfunc (c *Client) CheckToken(token string) (CheckTokenResponse, error) {\n\treqURL := fmt.Sprintf(\"%s\/check_token\", c.Host)\n\tbodyString := \"token=\" + token\n\trequest, err := http.NewRequest(\"POST\", reqURL, strings.NewReader(bodyString))\n\trequest.SetBasicAuth(c.Name, c.Secret)\n\trequest.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\tc.Logger.Debug(\"check-token\", lager.Data{\n\t\t\"URL\": request.URL,\n\t\t\"Header\": request.Header,\n\t})\n\n\tresp, err := c.HTTPClient.Do(request)\n\tif err != nil {\n\t\treturn CheckTokenResponse{}, fmt.Errorf(\"http client: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\trespBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn CheckTokenResponse{}, fmt.Errorf(\"read body: %s\", err)\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\terr = BadUaaResponse{\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tUaaResponseBody: string(respBytes),\n\t\t}\n\t\treturn CheckTokenResponse{}, err\n\t}\n\n\tresponseStruct := CheckTokenResponse{}\n\terr = json.Unmarshal(respBytes, &responseStruct)\n\tif err != nil {\n\t\treturn CheckTokenResponse{}, fmt.Errorf(\"unmarshal json: %s\", err)\n\t}\n\treturn responseStruct, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package softlayer\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/softlayer\/softlayer-go\/session\"\n)\n\nfunc Provider() terraform.ResourceProvider {\n\tdefaultSoftLayerSession := session.New()\n\treturn &schema.Provider{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"username\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: func() (interface{}, error) {\n\t\t\t\t\treturn defaultSoftLayerSession.UserName, nil\n\t\t\t\t},\n\t\t\t\tDescription: \"The user name for SoftLayer API operations.\",\n\t\t\t},\n\t\t\t\"api_key\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: func() (interface{}, error) {\n\t\t\t\t\treturn defaultSoftLayerSession.APIKey, nil\n\t\t\t\t},\n\t\t\t\tDescription: \"The API key for SoftLayer API operations.\",\n\t\t\t},\n\t\t\t\"endpoint_url\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: func() (interface{}, error) {\n\t\t\t\t\treturn defaultSoftLayerSession.Endpoint, nil\n\t\t\t\t},\n\t\t\t\tDescription: \"The endpoint url for the SoftLayer API.\",\n\t\t\t},\n\t\t\t\"timeout\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The timeout (in seconds) to set for any SoftLayer API calls made.\",\n\t\t\t},\n\t\t},\n\n\t\tDataSourcesMap: map[string]*schema.Resource{\n\t\t\t\"softlayer_ssh_key\": dataSourceSoftLayerSSHKey(),\n\t\t\t\"softlayer_image_template\": dataSourceSoftLayerImageTemplate(),\n\t\t\t\"softlayer_vlan\": dataSourceSoftLayerVlan(),\n\t\t\t\"softlayer_dns_domain\": dataSourceSoftLayerDnsDomain(),\n\t\t},\n\n\t\tResourcesMap: map[string]*schema.Resource{\n\t\t\t\"softlayer_virtual_guest\": resourceSoftLayerVirtualGuest(),\n\t\t\t\"softlayer_bare_metal\": resourceSoftLayerBareMetal(),\n\t\t\t\"softlayer_ssh_key\": resourceSoftLayerSSHKey(),\n\t\t\t\"softlayer_dns_domain_record\": resourceSoftLayerDnsDomainRecord(),\n\t\t\t\"softlayer_dns_domain\": resourceSoftLayerDnsDomain(),\n\t\t\t\"softlayer_lb_vpx\": resourceSoftLayerLbVpx(),\n\t\t\t\"softlayer_lb_vpx_vip\": resourceSoftLayerLbVpxVip(),\n\t\t\t\"softlayer_lb_vpx_service\": resourceSoftLayerLbVpxService(),\n\t\t\t\"softlayer_lb_vpx_ha\": resourceSoftLayerLbVpxHa(),\n\t\t\t\"softlayer_lb_local\": resourceSoftLayerLbLocal(),\n\t\t\t\"softlayer_lb_local_service_group\": resourceSoftLayerLbLocalServiceGroup(),\n\t\t\t\"softlayer_lb_local_service\": resourceSoftLayerLbLocalService(),\n\t\t\t\"softlayer_security_certificate\": resourceSoftLayerSecurityCertificate(),\n\t\t\t\"softlayer_user\": resourceSoftLayerUser(),\n\t\t\t\"softlayer_objectstorage_account\": resourceSoftLayerObjectStorageAccount(),\n\t\t\t\"softlayer_provisioning_hook\": resourceSoftLayerProvisioningHook(),\n\t\t\t\"softlayer_scale_policy\": resourceSoftLayerScalePolicy(),\n\t\t\t\"softlayer_scale_group\": resourceSoftLayerScaleGroup(),\n\t\t\t\"softlayer_basic_monitor\": resourceSoftLayerBasicMonitor(),\n\t\t\t\"softlayer_vlan\": resourceSoftLayerVlan(),\n\t\t\t\"softlayer_global_ip\": resourceSoftLayerGlobalIp(),\n\t\t\t\"softlayer_fw_hardware_dedicated\": resourceSoftLayerFwHardwareDedicated(),\n\t\t\t\"softlayer_fw_hardware_dedicated_rules\": resourceSoftLayerFwHardwareDedicatedRules(),\n\t\t\t\"softlayer_file_storage\": resourceSoftLayerFileStorage(),\n\t\t},\n\n\t\tConfigureFunc: providerConfigure,\n\t}\n}\n\ntype ProviderConfig interface {\n\tSoftLayerSession() *session.Session\n}\n\ntype providerConfig struct {\n\tSession *session.Session\n}\n\nfunc (config providerConfig) SoftLayerSession() *session.Session {\n\treturn config.Session\n}\n\nfunc providerConfigure(d *schema.ResourceData) (interface{}, error) {\n\tsess := session.Session{\n\t\tUserName: d.Get(\"username\").(string),\n\t\tAPIKey: d.Get(\"api_key\").(string),\n\t\tEndpoint: d.Get(\"endpoint_url\").(string),\n\t}\n\n\tif rawTimeout, ok := d.GetOk(\"timeout\"); ok {\n\t\ttimeout := rawTimeout.(int)\n\t\tsess.Timeout = time.Duration(timeout)\n\t}\n\n\tif sess.UserName == \"\" || sess.APIKey == \"\" {\n\t\treturn nil, errors.New(\n\t\t\t\"No SoftLayer credentials were found. Please ensure you have specified\" +\n\t\t\t\t\" them in the provider or in the environment (see the documentation).\",\n\t\t)\n\t}\n\n\tif os.Getenv(\"TF_LOG\") != \"\" {\n\t\tsess.Debug = true\n\t}\n\n\treturn providerConfig{Session: &sess}, nil\n}\n<commit_msg>Adding block storage resource to provider<commit_after>package softlayer\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/softlayer\/softlayer-go\/session\"\n)\n\nfunc Provider() terraform.ResourceProvider {\n\tdefaultSoftLayerSession := session.New()\n\treturn &schema.Provider{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"username\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: func() (interface{}, error) {\n\t\t\t\t\treturn defaultSoftLayerSession.UserName, nil\n\t\t\t\t},\n\t\t\t\tDescription: \"The user name for SoftLayer API operations.\",\n\t\t\t},\n\t\t\t\"api_key\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: func() (interface{}, error) {\n\t\t\t\t\treturn defaultSoftLayerSession.APIKey, nil\n\t\t\t\t},\n\t\t\t\tDescription: \"The API key for SoftLayer API operations.\",\n\t\t\t},\n\t\t\t\"endpoint_url\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: func() (interface{}, error) {\n\t\t\t\t\treturn defaultSoftLayerSession.Endpoint, nil\n\t\t\t\t},\n\t\t\t\tDescription: \"The endpoint url for the SoftLayer API.\",\n\t\t\t},\n\t\t\t\"timeout\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The timeout (in seconds) to set for any SoftLayer API calls made.\",\n\t\t\t},\n\t\t},\n\n\t\tDataSourcesMap: map[string]*schema.Resource{\n\t\t\t\"softlayer_ssh_key\": dataSourceSoftLayerSSHKey(),\n\t\t\t\"softlayer_image_template\": dataSourceSoftLayerImageTemplate(),\n\t\t\t\"softlayer_vlan\": dataSourceSoftLayerVlan(),\n\t\t\t\"softlayer_dns_domain\": dataSourceSoftLayerDnsDomain(),\n\t\t},\n\n\t\tResourcesMap: map[string]*schema.Resource{\n\t\t\t\"softlayer_virtual_guest\": resourceSoftLayerVirtualGuest(),\n\t\t\t\"softlayer_bare_metal\": resourceSoftLayerBareMetal(),\n\t\t\t\"softlayer_ssh_key\": resourceSoftLayerSSHKey(),\n\t\t\t\"softlayer_dns_domain_record\": resourceSoftLayerDnsDomainRecord(),\n\t\t\t\"softlayer_dns_domain\": resourceSoftLayerDnsDomain(),\n\t\t\t\"softlayer_lb_vpx\": resourceSoftLayerLbVpx(),\n\t\t\t\"softlayer_lb_vpx_vip\": resourceSoftLayerLbVpxVip(),\n\t\t\t\"softlayer_lb_vpx_service\": resourceSoftLayerLbVpxService(),\n\t\t\t\"softlayer_lb_vpx_ha\": resourceSoftLayerLbVpxHa(),\n\t\t\t\"softlayer_lb_local\": resourceSoftLayerLbLocal(),\n\t\t\t\"softlayer_lb_local_service_group\": resourceSoftLayerLbLocalServiceGroup(),\n\t\t\t\"softlayer_lb_local_service\": resourceSoftLayerLbLocalService(),\n\t\t\t\"softlayer_security_certificate\": resourceSoftLayerSecurityCertificate(),\n\t\t\t\"softlayer_user\": resourceSoftLayerUser(),\n\t\t\t\"softlayer_objectstorage_account\": resourceSoftLayerObjectStorageAccount(),\n\t\t\t\"softlayer_provisioning_hook\": resourceSoftLayerProvisioningHook(),\n\t\t\t\"softlayer_scale_policy\": resourceSoftLayerScalePolicy(),\n\t\t\t\"softlayer_scale_group\": resourceSoftLayerScaleGroup(),\n\t\t\t\"softlayer_basic_monitor\": resourceSoftLayerBasicMonitor(),\n\t\t\t\"softlayer_vlan\": resourceSoftLayerVlan(),\n\t\t\t\"softlayer_global_ip\": resourceSoftLayerGlobalIp(),\n\t\t\t\"softlayer_fw_hardware_dedicated\": resourceSoftLayerFwHardwareDedicated(),\n\t\t\t\"softlayer_fw_hardware_dedicated_rules\": resourceSoftLayerFwHardwareDedicatedRules(),\n\t\t\t\"softlayer_file_storage\": resourceSoftLayerFileStorage(),\n\t\t\t\"softlayer_block_storage\": resourceSoftLayerBlockStorage(),\n\t\t},\n\n\t\tConfigureFunc: providerConfigure,\n\t}\n}\n\ntype ProviderConfig interface {\n\tSoftLayerSession() *session.Session\n}\n\ntype providerConfig struct {\n\tSession *session.Session\n}\n\nfunc (config providerConfig) SoftLayerSession() *session.Session {\n\treturn config.Session\n}\n\nfunc providerConfigure(d *schema.ResourceData) (interface{}, error) {\n\tsess := session.Session{\n\t\tUserName: d.Get(\"username\").(string),\n\t\tAPIKey: d.Get(\"api_key\").(string),\n\t\tEndpoint: d.Get(\"endpoint_url\").(string),\n\t}\n\n\tif rawTimeout, ok := d.GetOk(\"timeout\"); ok {\n\t\ttimeout := rawTimeout.(int)\n\t\tsess.Timeout = time.Duration(timeout)\n\t}\n\n\tif sess.UserName == \"\" || sess.APIKey == \"\" {\n\t\treturn nil, errors.New(\n\t\t\t\"No SoftLayer credentials were found. Please ensure you have specified\" +\n\t\t\t\t\" them in the provider or in the environment (see the documentation).\",\n\t\t)\n\t}\n\n\tif os.Getenv(\"TF_LOG\") != \"\" {\n\t\tsess.Debug = true\n\t}\n\n\treturn providerConfig{Session: &sess}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n)\n\nimport \"git.torproject.org\/pluggable-transports\/goptlib.git\"\n\nconst ptMethodName = \"meek\"\n\nvar ptInfo pt.ClientInfo\n\n\/\/ When a connection handler starts, +1 is written to this channel; when it\n\/\/ ends, -1 is written.\nvar handlerChan = make(chan int)\n\nfunc copyLoop(a, b net.Conn) {\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tio.Copy(b, a)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\tio.Copy(a, b)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\nfunc handler(conn *pt.SocksConn) error {\n\thandlerChan <- 1\n\tdefer func() {\n\t\thandlerChan <- -1\n\t}()\n\n\tdefer conn.Close()\n\tremote, err := net.Dial(\"tcp\", conn.Req.Target)\n\tif err != nil {\n\t\tconn.Reject()\n\t\treturn err\n\t}\n\tdefer remote.Close()\n\terr = conn.Grant(remote.RemoteAddr().(*net.TCPAddr))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcopyLoop(conn, remote)\n\n\treturn nil\n}\n\nfunc acceptLoop(ln *pt.SocksListener) error {\n\tdefer ln.Close()\n\tfor {\n\t\tconn, err := ln.AcceptSocks()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error in AcceptSocks: %s\", err)\n\t\t\tif e, ok := err.(net.Error); ok && !e.Temporary() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tgo func() {\n\t\t\terr := handler(conn)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error in handling request: %s\", err)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc main() {\n\tvar logFilename string\n\n\tflag.StringVar(&logFilename, \"log\", \"\", \"name of log file\")\n\tflag.Parse()\n\n\tif logFilename != \"\" {\n\t\tf, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error opening log file: %s\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tlog.SetOutput(f)\n\t}\n\n\tvar err error\n\tptInfo, err = pt.ClientSetup([]string{ptMethodName})\n\tif err != nil {\n\t\tlog.Fatalf(\"error in ClientSetup: %s\", err)\n\t}\n\n\tlisteners := make([]net.Listener, 0)\n\tfor _, methodName := range ptInfo.MethodNames {\n\t\tswitch methodName {\n\t\tcase ptMethodName:\n\t\t\tln, err := pt.ListenSocks(\"tcp\", \"127.0.0.1:0\")\n\t\t\tif err != nil {\n\t\t\t\tpt.CmethodError(methodName, err.Error())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tgo acceptLoop(ln)\n\t\t\tpt.Cmethod(methodName, ln.Version(), ln.Addr())\n\t\t\tlog.Printf(\"listening on %s\", ln.Addr())\n\t\t\tlisteners = append(listeners, ln)\n\t\tdefault:\n\t\t\tpt.CmethodError(methodName, \"no such method\")\n\t\t}\n\t}\n\tpt.CmethodsDone()\n\n\tvar numHandlers int = 0\n\tvar sig os.Signal\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)\n\n\t\/\/ wait for first signal\n\tsig = nil\n\tfor sig == nil {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase sig = <-sigChan:\n\t\t}\n\t}\n\tfor _, ln := range listeners {\n\t\tln.Close()\n\t}\n\n\tif sig == syscall.SIGTERM {\n\t\treturn\n\t}\n\n\t\/\/ wait for second signal or no more handlers\n\tsig = nil\n\tfor sig == nil && numHandlers != 0 {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase sig = <-sigChan:\n\t\t}\n\t}\n\n\tlog.Printf(\"done\")\n}\n<commit_msg>Frame around request loop.<commit_after>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"flag\"\n\t\"log\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\nimport \"git.torproject.org\/pluggable-transports\/goptlib.git\"\n\nconst ptMethodName = \"meek\"\nconst sessionIdLength = 32\n\nvar ptInfo pt.ClientInfo\n\n\/\/ When a connection handler starts, +1 is written to this channel; when it\n\/\/ ends, -1 is written.\nvar handlerChan = make(chan int)\n\nfunc copyLoop(conn net.Conn, u, sessionId string) error {\n\treturn nil\n}\n\nfunc genSessionId() string {\n\tbuf := make([]byte, sessionIdLength)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn base64.StdEncoding.EncodeToString(buf)\n}\n\nfunc handler(conn *pt.SocksConn) error {\n\thandlerChan <- 1\n\tdefer func() {\n\t\thandlerChan <- -1\n\t}()\n\n\tdefer conn.Close()\n\terr := conn.Grant(&net.TCPAddr{IP: net.ParseIP(\"0.0.0.0\"), Port: 0})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsessionId := genSessionId()\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: conn.Req.Target,\n\t\tPath: \"\/\",\n\t}\n\n\treturn copyLoop(conn, u.String(), sessionId)\n}\n\nfunc acceptLoop(ln *pt.SocksListener) error {\n\tdefer ln.Close()\n\tfor {\n\t\tconn, err := ln.AcceptSocks()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error in AcceptSocks: %s\", err)\n\t\t\tif e, ok := err.(net.Error); ok && !e.Temporary() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tgo func() {\n\t\t\terr := handler(conn)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error in handling request: %s\", err)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc main() {\n\tvar logFilename string\n\n\tflag.StringVar(&logFilename, \"log\", \"\", \"name of log file\")\n\tflag.Parse()\n\n\tif logFilename != \"\" {\n\t\tf, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error opening log file: %s\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tlog.SetOutput(f)\n\t}\n\n\tvar err error\n\tptInfo, err = pt.ClientSetup([]string{ptMethodName})\n\tif err != nil {\n\t\tlog.Fatalf(\"error in ClientSetup: %s\", err)\n\t}\n\n\tlisteners := make([]net.Listener, 0)\n\tfor _, methodName := range ptInfo.MethodNames {\n\t\tswitch methodName {\n\t\tcase ptMethodName:\n\t\t\tln, err := pt.ListenSocks(\"tcp\", \"127.0.0.1:0\")\n\t\t\tif err != nil {\n\t\t\t\tpt.CmethodError(methodName, err.Error())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tgo acceptLoop(ln)\n\t\t\tpt.Cmethod(methodName, ln.Version(), ln.Addr())\n\t\t\tlog.Printf(\"listening on %s\", ln.Addr())\n\t\t\tlisteners = append(listeners, ln)\n\t\tdefault:\n\t\t\tpt.CmethodError(methodName, \"no such method\")\n\t\t}\n\t}\n\tpt.CmethodsDone()\n\n\tvar numHandlers int = 0\n\tvar sig os.Signal\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)\n\n\t\/\/ wait for first signal\n\tsig = nil\n\tfor sig == nil {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase sig = <-sigChan:\n\t\t}\n\t}\n\tfor _, ln := range listeners {\n\t\tln.Close()\n\t}\n\n\tif sig == syscall.SIGTERM {\n\t\treturn\n\t}\n\n\t\/\/ wait for second signal or no more handlers\n\tsig = nil\n\tfor sig == nil && numHandlers != 0 {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase sig = <-sigChan:\n\t\t}\n\t}\n\n\tlog.Printf(\"done\")\n}\n<|endoftext|>"} {"text":"<commit_before>package block\n\nimport (\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/barakmich\/agro\"\n)\n\nvar _ agro.BlockStore = &tempBlockStore{}\n\nfunc init() {\n\tagro.RegisterBlockStore(\"temp\", openTempBlockStore)\n}\n\ntype tempBlockStore struct {\n\tmut sync.RWMutex\n\tstore map[agro.BlockID][]byte\n\tnBlocks uint64\n}\n\nfunc openTempBlockStore(cfg agro.Config, gmd agro.GlobalMetadata) (agro.BlockStore, error) {\n\treturn &tempBlockStore{\n\t\tstore: make(map[agro.BlockID][]byte),\n\t\t\/\/ Lie about the number of blocks.\n\t\tnBlocks: cfg.StorageSize \/ 1024,\n\t}, nil\n}\n\nfunc (t *tempBlockStore) Flush() error { return nil }\n\nfunc (t *tempBlockStore) Close() error {\n\tt.mut.Lock()\n\tt.store = nil\n\tt.mut.Unlock()\n\treturn nil\n}\n\nfunc (t *tempBlockStore) NumBlocks() uint64 {\n\treturn t.nBlocks\n}\n\nfunc (t *tempBlockStore) GetBlock(_ context.Context, s agro.BlockID) ([]byte, error) {\n\tt.mut.RLock()\n\tdefer t.mut.RUnlock()\n\n\tif t.store == nil {\n\t\treturn nil, agro.ErrClosed\n\t}\n\n\tx, ok := t.store[s]\n\tif !ok {\n\t\treturn nil, agro.ErrBlockNotExist\n\t}\n\treturn x, nil\n}\n\nfunc (t *tempBlockStore) WriteBlock(_ context.Context, s agro.BlockID, data []byte) error {\n\tt.mut.Lock()\n\tdefer t.mut.Unlock()\n\n\tif t.store == nil {\n\t\treturn agro.ErrClosed\n\t}\n\n\tt.store[s] = data\n\treturn nil\n}\n\nfunc (t *tempBlockStore) DeleteBlock(_ context.Context, s agro.BlockID) error {\n\tt.mut.Lock()\n\tdefer t.mut.Unlock()\n\n\tif t.store == nil {\n\t\treturn agro.ErrClosed\n\t}\n\n\tdelete(t.store, s)\n\treturn nil\n}\n<commit_msg>update TODO<commit_after>package block\n\nimport (\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/barakmich\/agro\"\n)\n\nvar _ agro.BlockStore = &tempBlockStore{}\n\nfunc init() {\n\tagro.RegisterBlockStore(\"temp\", openTempBlockStore)\n}\n\ntype tempBlockStore struct {\n\tmut sync.RWMutex\n\tstore map[agro.BlockID][]byte\n\tnBlocks uint64\n}\n\nfunc openTempBlockStore(cfg agro.Config, gmd agro.GlobalMetadata) (agro.BlockStore, error) {\n\treturn &tempBlockStore{\n\t\tstore: make(map[agro.BlockID][]byte),\n\t\t\/\/ TODO(barakmich): Currently we lie about the number of blocks.\n\t\t\/\/ If we want to guess at a size, or make the map be a max size, or something, PRs accepted.\n\t\tnBlocks: cfg.StorageSize \/ 1024,\n\t}, nil\n}\n\nfunc (t *tempBlockStore) Flush() error { return nil }\n\nfunc (t *tempBlockStore) Close() error {\n\tt.mut.Lock()\n\tt.store = nil\n\tt.mut.Unlock()\n\treturn nil\n}\n\nfunc (t *tempBlockStore) NumBlocks() uint64 {\n\treturn t.nBlocks\n}\n\nfunc (t *tempBlockStore) GetBlock(_ context.Context, s agro.BlockID) ([]byte, error) {\n\tt.mut.RLock()\n\tdefer t.mut.RUnlock()\n\n\tif t.store == nil {\n\t\treturn nil, agro.ErrClosed\n\t}\n\n\tx, ok := t.store[s]\n\tif !ok {\n\t\treturn nil, agro.ErrBlockNotExist\n\t}\n\treturn x, nil\n}\n\nfunc (t *tempBlockStore) WriteBlock(_ context.Context, s agro.BlockID, data []byte) error {\n\tt.mut.Lock()\n\tdefer t.mut.Unlock()\n\n\tif t.store == nil {\n\t\treturn agro.ErrClosed\n\t}\n\n\tt.store[s] = data\n\treturn nil\n}\n\nfunc (t *tempBlockStore) DeleteBlock(_ context.Context, s agro.BlockID) error {\n\tt.mut.Lock()\n\tdefer t.mut.Unlock()\n\n\tif t.store == nil {\n\t\treturn agro.ErrClosed\n\t}\n\n\tdelete(t.store, s)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/biogo\/hts\/bam\"\n\t\"github.com\/biogo\/hts\/sam\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc stripTags(path string, tags []string) {\n\tf, err := os.Open(path)\n\tcheck(err)\n\tb, err := bam.NewReader(f, 3)\n\tcheck(err)\n\n\tout := os.Stdout\n\tcheck(err)\n\n\tw, err := bam.NewWriter(out, b.Header(), 2)\n\tcheck(err)\n\n\tbtags := make([][]byte, len(tags))\n\tfor i := range tags {\n\t\tbtags[i] = []byte(tags[i])\n\t}\n\n\tfor {\n\t\trec, err := b.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcheck(err)\n\t\t}\n\t\tnewAux := make(sam.AuxFields, len(rec.AuxFields))\n\t\tcopy(newAux, rec.AuxFields)\n\t\tfor _, btag := range btags {\n\t\t\tfor i, aux := range newAux {\n\t\t\t\tif bytes.Compare(aux[:2], btag) == 0 {\n\t\t\t\t\tcopy(newAux[i:], newAux[i+1:])\n\t\t\t\t\tnewAux[len(newAux)-1] = nil\n\t\t\t\t\tnewAux = newAux[:len(newAux)-1]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\trec.AuxFields = newAux\n\t\te := w.Write(rec)\n\t\tcheck(e)\n\n\t}\n\tw.Close()\n}\n\nfunc main() {\n\n\tbam := flag.String(\"bam\", \"\", \"path to bam to strip tags\")\n\tflag.Parse()\n\ttags := flag.Args()\n\tif len(tags) == 0 || *bam == \"\" {\n\t\tfmt.Printf(\"send in names of tags to strip and path to bam file\\n\")\n\t\tfmt.Printf(\"e.g. strip-tags -bam some.bam XS AS MC\")\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\tstripTags(*bam, tags)\n}\n<commit_msg>fix flag check<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/biogo\/hts\/bam\"\n\t\"github.com\/biogo\/hts\/sam\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc stripTags(path string, tags []string) {\n\tf, err := os.Open(path)\n\tcheck(err)\n\tb, err := bam.NewReader(f, 3)\n\tcheck(err)\n\n\tout := os.Stdout\n\tcheck(err)\n\n\tw, err := bam.NewWriter(out, b.Header(), 2)\n\tcheck(err)\n\n\tbtags := make([][]byte, len(tags))\n\tfor i := range tags {\n\t\tbtags[i] = []byte(tags[i])\n\t}\n\n\tfor {\n\t\trec, err := b.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcheck(err)\n\t\t}\n\t\tnewAux := make(sam.AuxFields, len(rec.AuxFields))\n\t\tcopy(newAux, rec.AuxFields)\n\t\tfor _, btag := range btags {\n\t\t\tfor i, aux := range newAux {\n\t\t\t\tif bytes.Compare(aux[:2], btag) == 0 {\n\t\t\t\t\tcopy(newAux[i:], newAux[i+1:])\n\t\t\t\t\tnewAux[len(newAux)-1] = nil\n\t\t\t\t\tnewAux = newAux[:len(newAux)-1]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\trec.AuxFields = newAux\n\t\te := w.Write(rec)\n\t\tcheck(e)\n\n\t}\n\tw.Close()\n}\n\nfunc main() {\n\n\tbam := flag.String(\"bam\", \"\", \"path to bam to strip tags\")\n\tflag.Parse()\n\ttags := flag.Args()\n\tif len(tags) == 0 && *bam == \"\" {\n\t\tfmt.Printf(\"send in names of tags to strip and path to bam file\\n\")\n\t\tfmt.Printf(\"e.g. strip-tags -bam some.bam XS AS MC\")\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\tstripTags(*bam, tags)\n}\n<|endoftext|>"} {"text":"<commit_before>package integration_test\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/cloudfoundry\/libbuildpack\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar bpDir string\nvar buildpackVersion string\nvar packagedBuildpack cutlass.VersionedBuildpackPackage\n\nfunc init() {\n\tflag.StringVar(&buildpackVersion, \"version\", \"\", \"version to use (builds if empty)\")\n\tflag.BoolVar(&cutlass.Cached, \"cached\", true, \"cached buildpack\")\n\tflag.StringVar(&cutlass.DefaultMemory, \"memory\", \"256M\", \"default memory for pushed apps\")\n\tflag.StringVar(&cutlass.DefaultDisk, \"disk\", \"384M\", \"default disk for pushed apps\")\n\tflag.Parse()\n}\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\t\/\/ Run once\n\tif buildpackVersion == \"\" {\n\t\tpackagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdata, err := json.Marshal(packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\treturn data\n\t}\n\n\treturn []byte{}\n}, func(data []byte) {\n\t\/\/ Run on all nodes\n\tvar err error\n\tif len(data) > 0 {\n\t\terr = json.Unmarshal(data, &packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tbuildpackVersion = packagedBuildpack.Version\n\t}\n\n\tbpDir, err = cutlass.FindRoot()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tExpect(cutlass.CopyCfHome()).To(Succeed())\n\n\tcutlass.SeedRandom()\n\tcutlass.DefaultStdoutStderr = GinkgoWriter\n\n\tSetDefaultEventuallyTimeout(10 * time.Second)\n})\n\nvar _ = SynchronizedAfterSuite(func() {\n\t\/\/ Run on all nodes\n}, func() {\n\t\/\/ Run once\n\tcutlass.RemovePackagedBuildpack(packagedBuildpack)\n\tExpect(cutlass.DeleteOrphanedRoutes()).To(Succeed())\n})\n\nfunc TestIntegration(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Integration Suite\")\n}\n\nfunc ConfirmRunning(app *cutlass.App) {\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n}\n\nfunc PushAppAndConfirm(app *cutlass.App) {\n\tExpect(app.Push()).To(Succeed())\n\tConfirmRunning(app)\n\tExpect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())\n}\n\nfunc Restart(app *cutlass.App) {\n\tExpect(app.Restart()).To(Succeed())\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n}\n\nfunc ApiGreaterThan(version string) bool {\n\tapiVersionString, err := cutlass.ApiVersion()\n\tExpect(err).To(BeNil())\n\tapiVersion, err := semver.Make(apiVersionString)\n\tExpect(err).To(BeNil())\n\treqVersion, err := semver.ParseRange(\">= \" + version)\n\tExpect(err).To(BeNil())\n\treturn reqVersion(apiVersion)\n}\n\nfunc ApiHasTask() bool {\n\treturn ApiGreaterThan(\"2.75.0\")\n}\nfunc ApiHasMultiBuildpack() bool {\n\treturn ApiGreaterThan(\"3.27.0\")\n}\n\nfunc SkipUnlessUncached() {\n\tif cutlass.Cached {\n\t\tSkip(\"Running cached tests\")\n\t}\n}\n\nfunc SkipUnlessCached() {\n\tif !cutlass.Cached {\n\t\tSkip(\"Running uncached tests\")\n\t}\n}\n\nfunc DestroyApp(app *cutlass.App) *cutlass.App {\n\tif app != nil {\n\t\tapp.Destroy()\n\t}\n\treturn nil\n}\n\nfunc DefaultVersion(name string) string {\n\tm := &libbuildpack.Manifest{}\n\terr := (&libbuildpack.YAML{}).Load(filepath.Join(bpDir, \"manifest.yml\"), m)\n\tExpect(err).ToNot(HaveOccurred())\n\tdep, err := m.DefaultVersion(name)\n\tExpect(err).ToNot(HaveOccurred())\n\tExpect(dep.Version).ToNot(Equal(\"\"))\n\treturn dep.Version\n}\n\nfunc AssertUsesProxyDuringStagingIfPresent(fixtureName string) {\n\tContext(\"with an uncached buildpack\", func() {\n\t\tBeforeEach(SkipUnlessUncached)\n\n\t\tPIt(\"uses a proxy during staging if present\", func() {\n\t\t\tproxy, err := cutlass.NewProxy()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer proxy.Close()\n\n\t\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\t\terr = cmd.Run()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer os.Remove(bpFile)\n\n\t\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\t\tbpDir,\n\t\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\t\tbpFile,\n\t\t\t\t[]string{\"HTTP_PROXY=\" + proxy.URL, \"HTTPS_PROXY=\" + proxy.URL},\n\t\t\t)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tdestUrl, err := url.Parse(proxy.URL)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tExpect(cutlass.UniqueDestination(\n\t\t\t\ttraffic, fmt.Sprintf(\"%s.%s\", destUrl.Hostname(), destUrl.Port()),\n\t\t\t)).To(BeNil())\n\t\t})\n\t})\n}\n\nfunc AssertNoInternetTraffic(fixtureName string) {\n\tPIt(\"has no traffic\", func() {\n\t\tSkipUnlessCached()\n\n\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\terr := cmd.Run()\n\t\tExpect(err).To(BeNil())\n\t\tdefer os.Remove(bpFile)\n\n\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\tbpDir,\n\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\tbpFile,\n\t\t\t[]string{},\n\t\t)\n\t\tExpect(err).To(BeNil())\n\t\tExpect(traffic).To(BeEmpty())\n\t})\n}\n<commit_msg>Enforce ENV[COMPOSER_GITHUB_OAUTH_TOKEN]<commit_after>package integration_test\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/cloudfoundry\/libbuildpack\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar bpDir string\nvar buildpackVersion string\nvar packagedBuildpack cutlass.VersionedBuildpackPackage\n\nfunc init() {\n\tflag.StringVar(&buildpackVersion, \"version\", \"\", \"version to use (builds if empty)\")\n\tflag.BoolVar(&cutlass.Cached, \"cached\", true, \"cached buildpack\")\n\tflag.StringVar(&cutlass.DefaultMemory, \"memory\", \"256M\", \"default memory for pushed apps\")\n\tflag.StringVar(&cutlass.DefaultDisk, \"disk\", \"384M\", \"default disk for pushed apps\")\n\tflag.Parse()\n}\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\t\/\/ Run once\n\tExpect(os.Getenv(\"COMPOSER_GITHUB_OAUTH_TOKEN\")).ToNot(BeEmpty()) \/\/ Required for some tests\n\n\tif buildpackVersion == \"\" {\n\t\tpackagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdata, err := json.Marshal(packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\treturn data\n\t}\n\n\treturn []byte{}\n}, func(data []byte) {\n\t\/\/ Run on all nodes\n\tvar err error\n\tif len(data) > 0 {\n\t\terr = json.Unmarshal(data, &packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tbuildpackVersion = packagedBuildpack.Version\n\t}\n\n\tbpDir, err = cutlass.FindRoot()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tExpect(cutlass.CopyCfHome()).To(Succeed())\n\n\tcutlass.SeedRandom()\n\tcutlass.DefaultStdoutStderr = GinkgoWriter\n\n\tSetDefaultEventuallyTimeout(10 * time.Second)\n})\n\nvar _ = SynchronizedAfterSuite(func() {\n\t\/\/ Run on all nodes\n}, func() {\n\t\/\/ Run once\n\tcutlass.RemovePackagedBuildpack(packagedBuildpack)\n\tExpect(cutlass.DeleteOrphanedRoutes()).To(Succeed())\n})\n\nfunc TestIntegration(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Integration Suite\")\n}\n\nfunc ConfirmRunning(app *cutlass.App) {\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n}\n\nfunc PushAppAndConfirm(app *cutlass.App) {\n\tExpect(app.Push()).To(Succeed())\n\tConfirmRunning(app)\n\tExpect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())\n}\n\nfunc Restart(app *cutlass.App) {\n\tExpect(app.Restart()).To(Succeed())\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n}\n\nfunc ApiGreaterThan(version string) bool {\n\tapiVersionString, err := cutlass.ApiVersion()\n\tExpect(err).To(BeNil())\n\tapiVersion, err := semver.Make(apiVersionString)\n\tExpect(err).To(BeNil())\n\treqVersion, err := semver.ParseRange(\">= \" + version)\n\tExpect(err).To(BeNil())\n\treturn reqVersion(apiVersion)\n}\n\nfunc ApiHasTask() bool {\n\treturn ApiGreaterThan(\"2.75.0\")\n}\nfunc ApiHasMultiBuildpack() bool {\n\treturn ApiGreaterThan(\"3.27.0\")\n}\n\nfunc SkipUnlessUncached() {\n\tif cutlass.Cached {\n\t\tSkip(\"Running cached tests\")\n\t}\n}\n\nfunc SkipUnlessCached() {\n\tif !cutlass.Cached {\n\t\tSkip(\"Running uncached tests\")\n\t}\n}\n\nfunc DestroyApp(app *cutlass.App) *cutlass.App {\n\tif app != nil {\n\t\tapp.Destroy()\n\t}\n\treturn nil\n}\n\nfunc DefaultVersion(name string) string {\n\tm := &libbuildpack.Manifest{}\n\terr := (&libbuildpack.YAML{}).Load(filepath.Join(bpDir, \"manifest.yml\"), m)\n\tExpect(err).ToNot(HaveOccurred())\n\tdep, err := m.DefaultVersion(name)\n\tExpect(err).ToNot(HaveOccurred())\n\tExpect(dep.Version).ToNot(Equal(\"\"))\n\treturn dep.Version\n}\n\nfunc AssertUsesProxyDuringStagingIfPresent(fixtureName string) {\n\tContext(\"with an uncached buildpack\", func() {\n\t\tBeforeEach(SkipUnlessUncached)\n\n\t\tIt(\"uses a proxy during staging if present\", func() {\n\t\t\tproxy, err := cutlass.NewProxy()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer proxy.Close()\n\n\t\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\t\terr = cmd.Run()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer os.Remove(bpFile)\n\n\t\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\t\tbpDir,\n\t\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\t\tbpFile,\n\t\t\t\t[]string{\"HTTP_PROXY=\" + proxy.URL, \"HTTPS_PROXY=\" + proxy.URL},\n\t\t\t)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tdestUrl, err := url.Parse(proxy.URL)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tExpect(cutlass.UniqueDestination(\n\t\t\t\ttraffic, fmt.Sprintf(\"%s.%s\", destUrl.Hostname(), destUrl.Port()),\n\t\t\t)).To(BeNil())\n\t\t})\n\t})\n}\n\nfunc AssertNoInternetTraffic(fixtureName string) {\n\tIt(\"has no traffic\", func() {\n\t\tSkipUnlessCached()\n\n\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\terr := cmd.Run()\n\t\tExpect(err).To(BeNil())\n\t\tdefer os.Remove(bpFile)\n\n\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\tbpDir,\n\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\tbpFile,\n\t\t\t[]string{},\n\t\t)\n\t\tExpect(err).To(BeNil())\n\t\tExpect(traffic).To(BeEmpty())\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package data\n\nimport (\n\t\"encoding\/csv\"\n\t\"time\"\n\t\"fmt\n\t\"strconv\"\n)\n\nfunc csvParse(file io.Reader) (labels []string, data []Record) {\n\treader := csv.NewReader (file)\n\ttmpdata, err := reader.ReadAll()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tlabels := tmpdata[0]\n\tdata := make([]Record, len(tmpdata)-1)\n\tfor i := 1; i<len(tmpdata); i++ {\n\t\tdata[i-1].Time := time.Parse(dataSource.ISO, tmpdata[i][0])\n\t\tdata[i-1].Radiation, err := strconv.ParseFloat(tmpdata[i][1], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Humidity, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Temperature, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Wind, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Power, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].null = true\n\t\t}\n\t}\n}\n\nfunc fillRecords (emptyData []Record) (data []Record){\n\tgradRad, gradHumidity, gradTemp, gradWind := 0.0, 0.0, 0.0, 0.0\n\tfor i := 0; i<len(emptyData); i++ {\n\t\tif emptyData[i].empty {\n\t\t\temptyData[i].Radiation = emptyData[i-1].Radiation + gradRad\n\t\t\temptyData[i].Humidity = emptyData[i-1].Humidity + gradHumidity\n\t\t\temptyData[i].Temperature = emptyData[i-1].Temperature + gradTemp\n\t\t\temptyData[i].Wind = emptyData[i-1].Wind + gradWind\n\t\t\temptyData[i].empty = false\n\t\t} else {\n\t\t\tgradRad = (emptyData[i+4].Radiation - emptyData[i].Radiation)\/4\n\t\t\tgradHumidity = (emptyData[i+4].Humidity - emptyData[i].Humidity)\/4\n\t\t\tgradTemp = (emptyData[i+4].Temperature - emptyData[i].Temperature)\/4\n\t\t\tgradWind = (emptyData[i+4].Wind - emptyData[i].Wind)\/4\n\t\t}\n\t}\n}\n<commit_msg>Fixed Missing \"<commit_after>package data\n\nimport (\n\t\"encoding\/csv\"\n\t\"time\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\nfunc csvParse(file io.Reader) (labels []string, data []Record) {\n\treader := csv.NewReader (file)\n\ttmpdata, err := reader.ReadAll()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tlabels := tmpdata[0]\n\tdata := make([]Record, len(tmpdata)-1)\n\tfor i := 1; i<len(tmpdata); i++ {\n\t\tdata[i-1].Time := time.Parse(dataSource.ISO, tmpdata[i][0])\n\t\tdata[i-1].Radiation, err := strconv.ParseFloat(tmpdata[i][1], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Humidity, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Temperature, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Wind, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].empty = true\n\t\t}\n\t\tdata[i-1].Power, err := strconv.ParseFloat(tmpdata[i][2], 32)\n\t\tif err != nil {\n\t\t\tdata[i-1].null = true\n\t\t}\n\t}\n}\n\nfunc fillRecords (emptyData []Record) (data []Record){\n\tgradRad, gradHumidity, gradTemp, gradWind := 0.0, 0.0, 0.0, 0.0\n\tfor i := 0; i<len(emptyData); i++ {\n\t\tif emptyData[i].empty {\n\t\t\temptyData[i].Radiation = emptyData[i-1].Radiation + gradRad\n\t\t\temptyData[i].Humidity = emptyData[i-1].Humidity + gradHumidity\n\t\t\temptyData[i].Temperature = emptyData[i-1].Temperature + gradTemp\n\t\t\temptyData[i].Wind = emptyData[i-1].Wind + gradWind\n\t\t\temptyData[i].empty = false\n\t\t} else {\n\t\t\tgradRad = (emptyData[i+4].Radiation - emptyData[i].Radiation)\/4\n\t\t\tgradHumidity = (emptyData[i+4].Humidity - emptyData[i].Humidity)\/4\n\t\t\tgradTemp = (emptyData[i+4].Temperature - emptyData[i].Temperature)\/4\n\t\t\tgradWind = (emptyData[i+4].Wind - emptyData[i].Wind)\/4\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n \n*\/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"rand\"\n)\n\nfunc partition(a []int) int {\n\tl := len(a)\n\tleft := 0\n\tright := 0\n\tpivot := 0\n\tif l > 0 {\n\t\tright = l - 1\n\t}\n\tif l < 3 {\n\t\tpivot = right\n\t} else {\n\t\t\/\/ median of three calculation\n\t\tvar ps [3]int\n\t\tfor i := 0; i < 3; i++ {\n\t\t\tps[i] = int(rand.Uint32() % uint32(l))\n\t\t}\n\t\tif a[ps[0]] > a[ps[1]] {\n\t\t\tps[0], ps[1] = ps[1], ps[0]\n\t\t}\n\t\tif a[ps[1]] > a[ps[2]] {\n\t\t\tps[1], ps[2] = ps[2], ps[1]\n\t\t}\n\t\tif a[ps[0]] > a[ps[1]] {\n\t\t\tps[0], ps[1] = ps[1], ps[0]\n\t\t}\n\t\tpivot = ps[1]\n\t}\n\ta[pivot], a[right] = a[right], a[pivot]\n\tstore := left\n\tfor i := left; i < right; i++ {\n\t\tif a[i] <= a[right] {\n\t\t\ta[i], a[store] = a[store], a[i]\n\t\t\tstore += 1\n\t\t}\n\t}\n\ta[store], a[right] = a[right], a[store]\n\treturn store\n}\n\nfunc QuickSort(array []int) {\n\tif len(array) > 1 {\n\t\t\/\/ recursively work on the sub-arrays,\n\t\t\/\/ which are just slices of slices\n\t\tpivot := partition(array)\n\t\tQuickSort(array[0:pivot])\n\t\tQuickSort(array[pivot+1:])\n\t}\n}\n\nfunc main() {\n\tarray := []int{89, 606, 533, 1999, 3, 1, 22, 604, 605, 77}\n\tfmt.Println(\"Unsorted: \", array)\n\tQuickSort(array)\n\tfmt.Println(\"Sorted : \", array)\n\tfor i := len(array) - 1; i > 0; i-- {\n\t\tif array[i] < array[i-1] {\n\t\t\tfmt.Println(\"FAILED\")\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Println(\"PASSED\")\n}\n<commit_msg>remove test mod<commit_after>\/*\n\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"rand\"\n)\n\nfunc partition(a []int) int {\n\tl := len(a)\n\tleft := 0\n\tright := 0\n\tpivot := 0\n\tif l > 0 {\n\t\tright = l - 1\n\t}\n\tif l < 3 {\n\t\tpivot = right\n\t} else {\n\t\t\/\/ median of three calculation\n\t\tvar ps [3]int\n\t\tfor i := 0; i < 3; i++ {\n\t\t\tps[i] = int(rand.Uint32() % uint32(l))\n\t\t}\n\t\tif a[ps[0]] > a[ps[1]] {\n\t\t\tps[0], ps[1] = ps[1], ps[0]\n\t\t}\n\t\tif a[ps[1]] > a[ps[2]] {\n\t\t\tps[1], ps[2] = ps[2], ps[1]\n\t\t}\n\t\tif a[ps[0]] > a[ps[1]] {\n\t\t\tps[0], ps[1] = ps[1], ps[0]\n\t\t}\n\t\tpivot = ps[1]\n\t}\n\ta[pivot], a[right] = a[right], a[pivot]\n\tstore := left\n\tfor i := left; i < right; i++ {\n\t\tif a[i] <= a[right] {\n\t\t\ta[i], a[store] = a[store], a[i]\n\t\t\tstore += 1\n\t\t}\n\t}\n\ta[store], a[right] = a[right], a[store]\n\treturn store\n}\n\nfunc QuickSort(array []int) {\n\tif len(array) > 1 {\n\t\t\/\/ recursively work on the sub-arrays,\n\t\t\/\/ which are just slices of slices\n\t\tpivot := partition(array)\n\t\tQuickSort(array[0:pivot])\n\t\tQuickSort(array[pivot+1:])\n\t}\n}\n\nfunc main() {\n\tarray := []int{89, 606, 533, 1999, 3, 1, 22, 604, 605, 77}\n\tfmt.Println(\"Unsorted: \", array)\n\tQuickSort(array)\n\tfmt.Println(\"Sorted : \", array)\n\tfor i := len(array) - 1; i > 0; i-- {\n\t\tif array[i] < array[i-1] {\n\t\t\tfmt.Println(\"FAILED\")\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Println(\"PASSED\")\n}\n<|endoftext|>"} {"text":"<commit_before>package http\n\nimport (\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Cepave\/open-falcon-backend\/modules\/query\/g\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t\"github.com\/jasonlvhit\/gocron\"\n)\n\ntype Contacts struct {\n\tId int\n\tName string\n\tPhone string\n\tEmail string\n\tUpdated string\n}\n\ntype Hosts struct {\n\tId int\n\tHostname string\n\tExist int\n\tActivate int\n\tPlatform string\n\tPlatforms string\n\tIdc string\n\tIp string\n\tIsp string\n\tProvince string\n\tCity string\n\tStatus string\n\tUpdated string\n}\n\ntype Idcs struct {\n\tId int\n\tPopid int\n\tIdc string\n\tBandwidth int\n\tCount int\n\tArea string\n\tProvince string\n\tCity string\n\tUpdated string\n}\n\ntype Platforms struct {\n\tId int\n\tPlatform string\n\tContacts string\n\tPrincipal string\n\tDeputy string\n\tUpgrader string\n\tCount int\n\tUpdated string\n}\n\nfunc SyncHostsAndContactsTable() {\n\tif g.Config().Hosts.Enabled || g.Config().Contacts.Enabled {\n\t\tif g.Config().Hosts.Enabled {\n\t\t\tupdateMapData()\n\t\t\tsyncHostsTable()\n\t\t\tintervalToSyncHostsTable := uint64(g.Config().Hosts.Interval)\n\t\t\tgocron.Every(intervalToSyncHostsTable).Seconds().Do(syncHostsTable)\n\t\t}\n\t\tif g.Config().Contacts.Enabled {\n\t\t\tsyncContactsTable()\n\t\t\tintervalToSyncContactsTable := uint64(g.Config().Contacts.Interval)\n\t\t\tgocron.Every(intervalToSyncContactsTable).Seconds().Do(syncContactsTable)\n\t\t}\n\t\t<-gocron.Start()\n\t}\n}\n\nfunc getIDCMap() map[string]interface{} {\n\tidcMap := map[string]interface{}{}\n\to := orm.NewOrm()\n\tvar idcs []Idc\n\tsqlcommand := \"SELECT pop_id, name, province, city FROM grafana.idc ORDER BY pop_id ASC\"\n\t_, err := o.Raw(sqlcommand).QueryRows(&idcs)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t}\n\tfor _, idc := range idcs {\n\t\tidcMap[strconv.Itoa(idc.Pop_id)] = idc\n\t}\n\treturn idcMap\n}\n\nfunc updateHostsTable(hostnames []string, hostsMap map[string]map[string]string) {\n\tlog.Debugf(\"func updateHostsTable()\")\n\tvar hosts []Hosts\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\t_, err := o.QueryTable(\"hosts\").Limit(10000).All(&hosts)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t} else {\n\t\tformat := \"2006-01-02 15:04:05\"\n\t\tfor _, host := range hosts {\n\t\t\tupdatedTime, _ := time.Parse(format, host.Updated)\n\t\t\tcurrentTime, _ := time.Parse(format, getNow())\n\t\t\tdiff := currentTime.Unix() - updatedTime.Unix()\n\t\t\tif diff > 600 {\n\t\t\t\thost.Exist = 0\n\t\t\t\t_, err := o.Update(&host)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\thosts = []Hosts{}\n\tidcMap := getIDCMap()\n\tvar host Hosts\n\tfor _, hostname := range hostnames {\n\t\titem := hostsMap[hostname]\n\t\tactivate, _ := strconv.Atoi(item[\"activate\"])\n\t\thost.Hostname = item[\"hostname\"]\n\t\thost.Exist = 1\n\t\thost.Activate = activate\n\t\thost.Platform = item[\"platform\"]\n\t\thost.Ip = item[\"ip\"]\n\t\thost.Isp = strings.Split(item[\"hostname\"], \"-\")[0]\n\t\thost.Updated = getNow()\n\t\tidcID := item[\"idcID\"]\n\t\tif _, ok := idcMap[idcID]; ok {\n\t\t\tidc := idcMap[idcID]\n\t\t\thost.Idc = idc.(Idc).Name\n\t\t\thost.Province = idc.(Idc).Province\n\t\t\thost.City = idc.(Idc).City\n\t\t}\n\t\thosts = append(hosts, host)\n\t}\n\tfor _, item := range hosts {\n\t\terr := o.QueryTable(\"hosts\").Limit(10000).Filter(\"hostname\", item.Hostname).One(&host)\n\t\tif err == orm.ErrNoRows {\n\t\t\tsql := \"INSERT INTO boss.hosts(\"\n\t\t\tsql += \"hostname, exist, activate, platform, idc, ip, \"\n\t\t\tsql += \"isp, province, city, updated) \"\n\t\t\tsql += \"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n\t\t\t_, err := o.Raw(sql, item.Hostname, item.Exist, item.Activate, item.Platform, item.Idc, item.Ip, item.Isp, item.Province, item.City, item.Updated).Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(err.Error())\n\t\t} else {\n\t\t\titem.Id = host.Id\n\t\t\t_, err := o.Update(&item)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc updatePlatformsTable(platformNames []string, platformsMap map[string]map[string]interface{}) {\n\tlog.Debugf(\"func updatePlatformsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar platform Platforms\n\tfor _, platformName := range platformNames {\n\t\tgroup := platformsMap[platformName]\n\t\terr := o.QueryTable(\"platforms\").Filter(\"platform\", group[\"platformName\"]).One(&platform)\n\t\tif err == orm.ErrNoRows {\n\t\t\tsql := \"INSERT INTO boss.platforms(platform, contacts, count, updated) VALUES(?, ?, ?, ?)\"\n\t\t\t_, err := o.Raw(sql, group[\"platformName\"], group[\"contacts\"], group[\"count\"], getNow()).Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(err.Error())\n\t\t} else {\n\t\t\tplatform.Platform = group[\"platformName\"].(string)\n\t\t\tplatform.Count = group[\"count\"].(int)\n\t\t\tplatform.Updated = getNow()\n\t\t\t_, err := o.Update(&platform)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc updateContactsTable(contactNames []string, contactsMap map[string]map[string]interface{}) {\n\tlog.Debugf(\"func updateContactsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar contact Contacts\n\tfor _, contactName := range contactNames {\n\t\tuser := contactsMap[contactName]\n\t\terr := o.QueryTable(\"contacts\").Filter(\"name\", user[\"name\"]).One(&contact)\n\t\tif err == orm.ErrNoRows {\n\t\t\tsql := \"INSERT INTO boss.contacts(name, phone, email, updated) VALUES(?, ?, ?, ?)\"\n\t\t\t_, err := o.Raw(sql, user[\"name\"], user[\"phone\"], user[\"email\"], getNow()).Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(err.Error())\n\t\t} else {\n\t\t\tcontact.Email = user[\"email\"].(string)\n\t\t\tcontact.Phone = user[\"phone\"].(string)\n\t\t\tcontact.Updated = getNow()\n\t\t\t_, err := o.Update(&contact)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc addContactsToPlatformsTable(contacts map[string]interface{}) {\n\tlog.Debugf(\"func addContactsToPlatformsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar platforms []Platforms\n\t_, err := o.QueryTable(\"platforms\").All(&platforms)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t} else {\n\t\tfor _, platform := range platforms {\n\t\t\tcontactsOfPlatform := []string{}\n\t\t\tplatformName := platform.Platform\n\t\t\tif users, ok := contacts[platformName]; ok {\n\t\t\t\tfor _, user := range users.([]interface{}) {\n\t\t\t\t\tcontactName := user.(map[string]interface{})[\"name\"].(string)\n\t\t\t\t\tcontactsOfPlatform = appendUniqueString(contactsOfPlatform, contactName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(contactsOfPlatform) > 0 {\n\t\t\t\tplatform.Contacts = strings.Join(contactsOfPlatform, \",\")\n\t\t\t\tplatform.Updated = getNow()\n\t\t\t\t_, err := o.Update(&platform)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc syncHostsTable() {\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar rows []orm.Params\n\tsql := \"SELECT updated FROM boss.hosts WHERE exist = 1 ORDER BY updated DESC LIMIT 1\"\n\tnum, err := o.Raw(sql).Values(&rows)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn\n\t} else if num > 0 {\n\t\tformat := \"2006-01-02 15:04:05\"\n\t\tupdatedTime, _ := time.Parse(format, rows[0][\"updated\"].(string))\n\t\tcurrentTime, _ := time.Parse(format, getNow())\n\t\tdiff := currentTime.Unix() - updatedTime.Unix()\n\t\tif int(diff) < g.Config().Hosts.Interval {\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar nodes = make(map[string]interface{})\n\terrors := []string{}\n\tvar result = make(map[string]interface{})\n\tresult[\"error\"] = errors\n\tgetPlatformJSON(nodes, result)\n\tif nodes[\"status\"] == nil {\n\t\treturn\n\t} else if int(nodes[\"status\"].(float64)) != 1 {\n\t\treturn\n\t}\n\tplatformNames := []string{}\n\tplatformsMap := map[string]map[string]interface{}{}\n\thostnames := []string{}\n\thostsMap := map[string]map[string]string{}\n\thostnamesMap := map[string]int{}\n\tidcIDs := []string{}\n\thostname := \"\"\n\tfor _, platform := range nodes[\"result\"].([]interface{}) {\n\t\tcountOfHosts := 0\n\t\tplatformName := platform.(map[string]interface{})[\"platform\"].(string)\n\t\tplatformNames = appendUniqueString(platformNames, platformName)\n\t\tfor _, device := range platform.(map[string]interface{})[\"ip_list\"].([]interface{}) {\n\t\t\thostname = device.(map[string]interface{})[\"hostname\"].(string)\n\t\t\tip := device.(map[string]interface{})[\"ip\"].(string)\n\t\t\tif len(ip) > 0 && ip == getIPFromHostname(hostname, result) {\n\t\t\t\tif _, ok := hostnamesMap[hostname]; !ok {\n\t\t\t\t\thostnames = append(hostnames, hostname)\n\t\t\t\t\tidcID := device.(map[string]interface{})[\"pop_id\"].(string)\n\t\t\t\t\thost := map[string]string{\n\t\t\t\t\t\t\"hostname\": hostname,\n\t\t\t\t\t\t\"activate\": device.(map[string]interface{})[\"ip_status\"].(string),\n\t\t\t\t\t\t\"platform\": platformName,\n\t\t\t\t\t\t\"idcID\": idcID,\n\t\t\t\t\t\t\"ip\": ip,\n\t\t\t\t\t}\n\t\t\t\t\thostsMap[hostname] = host\n\t\t\t\t\tidcIDs = appendUniqueString(idcIDs, idcID)\n\t\t\t\t\thostnamesMap[hostname] = 1\n\t\t\t\t\tcountOfHosts++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tplatformsMap[platformName] = map[string]interface{}{\n\t\t\t\"platformName\": platformName,\n\t\t\t\"count\": countOfHosts,\n\t\t\t\"contacts\": \"\",\n\t\t}\n\t}\n\tsort.Strings(hostnames)\n\tsort.Strings(platformNames)\n\tlog.Debugf(\"platformNames =\", platformNames)\n\tupdateHostsTable(hostnames, hostsMap)\n\tupdatePlatformsTable(platformNames, platformsMap)\n}\n\nfunc syncContactsTable() {\n\tlog.Debugf(\"func syncContactsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar rows []orm.Params\n\tsql := \"SELECT updated FROM boss.contacts ORDER BY updated DESC LIMIT 1\"\n\tnum, err := o.Raw(sql).Values(&rows)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn\n\t} else if num > 0 {\n\t\tformat := \"2006-01-02 15:04:05\"\n\t\tupdatedTime, _ := time.Parse(format, rows[0][\"updated\"].(string))\n\t\tcurrentTime, _ := time.Parse(format, getNow())\n\t\tdiff := currentTime.Unix() - updatedTime.Unix()\n\t\tif int(diff) < g.Config().Contacts.Interval {\n\t\t\treturn\n\t\t}\n\t}\n\n\tplatformNames := []string{}\n\tsql = \"SELECT DISTINCT platform FROM boss.platforms ORDER BY platform ASC\"\n\tnum, err = o.Raw(sql).Values(&rows)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn\n\t} else if num > 0 {\n\t\tfor _, row := range rows {\n\t\t\tplatformNames = append(platformNames, row[\"platform\"].(string))\n\t\t}\n\t}\n\n\tvar nodes = make(map[string]interface{})\n\terrors := []string{}\n\tvar result = make(map[string]interface{})\n\tresult[\"error\"] = errors\n\tgetPlatformContact(strings.Join(platformNames, \",\"), nodes)\n\tcontactNames := []string{}\n\tcontactsMap := map[string]map[string]interface{}{}\n\tcontacts := nodes[\"result\"].(map[string]interface{})[\"items\"].(map[string]interface{})\n\tfor _, platformName := range platformNames {\n\t\tif items, ok := contacts[platformName]; ok {\n\t\t\tfor _, user := range items.([]interface{}) {\n\t\t\t\tcontactName := user.(map[string]interface{})[\"name\"].(string)\n\t\t\t\tif _, ok := contactsMap[contactName]; !ok {\n\t\t\t\t\tcontactsMap[contactName] = user.(map[string]interface{})\n\t\t\t\t\tcontactNames = append(contactNames, contactName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tsort.Strings(contactNames)\n\tupdateContactsTable(contactNames, contactsMap)\n\taddContactsToPlatformsTable(contacts)\n}\n<commit_msg>[OWL-1165][query] add Ips struct<commit_after>package http\n\nimport (\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Cepave\/open-falcon-backend\/modules\/query\/g\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t\"github.com\/jasonlvhit\/gocron\"\n)\n\ntype Contacts struct {\n\tId int\n\tName string\n\tPhone string\n\tEmail string\n\tUpdated string\n}\n\ntype Hosts struct {\n\tId int\n\tHostname string\n\tExist int\n\tActivate int\n\tPlatform string\n\tPlatforms string\n\tIdc string\n\tIp string\n\tIsp string\n\tProvince string\n\tCity string\n\tStatus string\n\tUpdated string\n}\n\ntype Idcs struct {\n\tId int\n\tPopid int\n\tIdc string\n\tBandwidth int\n\tCount int\n\tArea string\n\tProvince string\n\tCity string\n\tUpdated string\n}\n\ntype Ips struct {\n\tId int\n\tIp string\n\tExist int\n\tStatus int\n\tHostname string\n\tPlatform string\n\tUpdated string\n}\n\ntype Platforms struct {\n\tId int\n\tPlatform string\n\tContacts string\n\tPrincipal string\n\tDeputy string\n\tUpgrader string\n\tCount int\n\tUpdated string\n}\n\nfunc SyncHostsAndContactsTable() {\n\tif g.Config().Hosts.Enabled || g.Config().Contacts.Enabled {\n\t\tif g.Config().Hosts.Enabled {\n\t\t\tupdateMapData()\n\t\t\tsyncHostsTable()\n\t\t\tintervalToSyncHostsTable := uint64(g.Config().Hosts.Interval)\n\t\t\tgocron.Every(intervalToSyncHostsTable).Seconds().Do(syncHostsTable)\n\t\t}\n\t\tif g.Config().Contacts.Enabled {\n\t\t\tsyncContactsTable()\n\t\t\tintervalToSyncContactsTable := uint64(g.Config().Contacts.Interval)\n\t\t\tgocron.Every(intervalToSyncContactsTable).Seconds().Do(syncContactsTable)\n\t\t}\n\t\t<-gocron.Start()\n\t}\n}\n\nfunc getIDCMap() map[string]interface{} {\n\tidcMap := map[string]interface{}{}\n\to := orm.NewOrm()\n\tvar idcs []Idc\n\tsqlcommand := \"SELECT pop_id, name, province, city FROM grafana.idc ORDER BY pop_id ASC\"\n\t_, err := o.Raw(sqlcommand).QueryRows(&idcs)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t}\n\tfor _, idc := range idcs {\n\t\tidcMap[strconv.Itoa(idc.Pop_id)] = idc\n\t}\n\treturn idcMap\n}\n\nfunc updateHostsTable(hostnames []string, hostsMap map[string]map[string]string) {\n\tlog.Debugf(\"func updateHostsTable()\")\n\tvar hosts []Hosts\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\t_, err := o.QueryTable(\"hosts\").Limit(10000).All(&hosts)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t} else {\n\t\tformat := \"2006-01-02 15:04:05\"\n\t\tfor _, host := range hosts {\n\t\t\tupdatedTime, _ := time.Parse(format, host.Updated)\n\t\t\tcurrentTime, _ := time.Parse(format, getNow())\n\t\t\tdiff := currentTime.Unix() - updatedTime.Unix()\n\t\t\tif diff > 600 {\n\t\t\t\thost.Exist = 0\n\t\t\t\t_, err := o.Update(&host)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\thosts = []Hosts{}\n\tidcMap := getIDCMap()\n\tvar host Hosts\n\tfor _, hostname := range hostnames {\n\t\titem := hostsMap[hostname]\n\t\tactivate, _ := strconv.Atoi(item[\"activate\"])\n\t\thost.Hostname = item[\"hostname\"]\n\t\thost.Exist = 1\n\t\thost.Activate = activate\n\t\thost.Platform = item[\"platform\"]\n\t\thost.Ip = item[\"ip\"]\n\t\thost.Isp = strings.Split(item[\"hostname\"], \"-\")[0]\n\t\thost.Updated = getNow()\n\t\tidcID := item[\"idcID\"]\n\t\tif _, ok := idcMap[idcID]; ok {\n\t\t\tidc := idcMap[idcID]\n\t\t\thost.Idc = idc.(Idc).Name\n\t\t\thost.Province = idc.(Idc).Province\n\t\t\thost.City = idc.(Idc).City\n\t\t}\n\t\thosts = append(hosts, host)\n\t}\n\tfor _, item := range hosts {\n\t\terr := o.QueryTable(\"hosts\").Limit(10000).Filter(\"hostname\", item.Hostname).One(&host)\n\t\tif err == orm.ErrNoRows {\n\t\t\tsql := \"INSERT INTO boss.hosts(\"\n\t\t\tsql += \"hostname, exist, activate, platform, idc, ip, \"\n\t\t\tsql += \"isp, province, city, updated) \"\n\t\t\tsql += \"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n\t\t\t_, err := o.Raw(sql, item.Hostname, item.Exist, item.Activate, item.Platform, item.Idc, item.Ip, item.Isp, item.Province, item.City, item.Updated).Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(err.Error())\n\t\t} else {\n\t\t\titem.Id = host.Id\n\t\t\t_, err := o.Update(&item)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc updatePlatformsTable(platformNames []string, platformsMap map[string]map[string]interface{}) {\n\tlog.Debugf(\"func updatePlatformsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar platform Platforms\n\tfor _, platformName := range platformNames {\n\t\tgroup := platformsMap[platformName]\n\t\terr := o.QueryTable(\"platforms\").Filter(\"platform\", group[\"platformName\"]).One(&platform)\n\t\tif err == orm.ErrNoRows {\n\t\t\tsql := \"INSERT INTO boss.platforms(platform, contacts, count, updated) VALUES(?, ?, ?, ?)\"\n\t\t\t_, err := o.Raw(sql, group[\"platformName\"], group[\"contacts\"], group[\"count\"], getNow()).Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(err.Error())\n\t\t} else {\n\t\t\tplatform.Platform = group[\"platformName\"].(string)\n\t\t\tplatform.Count = group[\"count\"].(int)\n\t\t\tplatform.Updated = getNow()\n\t\t\t_, err := o.Update(&platform)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc updateContactsTable(contactNames []string, contactsMap map[string]map[string]interface{}) {\n\tlog.Debugf(\"func updateContactsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar contact Contacts\n\tfor _, contactName := range contactNames {\n\t\tuser := contactsMap[contactName]\n\t\terr := o.QueryTable(\"contacts\").Filter(\"name\", user[\"name\"]).One(&contact)\n\t\tif err == orm.ErrNoRows {\n\t\t\tsql := \"INSERT INTO boss.contacts(name, phone, email, updated) VALUES(?, ?, ?, ?)\"\n\t\t\t_, err := o.Raw(sql, user[\"name\"], user[\"phone\"], user[\"email\"], getNow()).Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(err.Error())\n\t\t} else {\n\t\t\tcontact.Email = user[\"email\"].(string)\n\t\t\tcontact.Phone = user[\"phone\"].(string)\n\t\t\tcontact.Updated = getNow()\n\t\t\t_, err := o.Update(&contact)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc addContactsToPlatformsTable(contacts map[string]interface{}) {\n\tlog.Debugf(\"func addContactsToPlatformsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar platforms []Platforms\n\t_, err := o.QueryTable(\"platforms\").All(&platforms)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t} else {\n\t\tfor _, platform := range platforms {\n\t\t\tcontactsOfPlatform := []string{}\n\t\t\tplatformName := platform.Platform\n\t\t\tif users, ok := contacts[platformName]; ok {\n\t\t\t\tfor _, user := range users.([]interface{}) {\n\t\t\t\t\tcontactName := user.(map[string]interface{})[\"name\"].(string)\n\t\t\t\t\tcontactsOfPlatform = appendUniqueString(contactsOfPlatform, contactName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(contactsOfPlatform) > 0 {\n\t\t\t\tplatform.Contacts = strings.Join(contactsOfPlatform, \",\")\n\t\t\t\tplatform.Updated = getNow()\n\t\t\t\t_, err := o.Update(&platform)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc syncHostsTable() {\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar rows []orm.Params\n\tsql := \"SELECT updated FROM boss.hosts WHERE exist = 1 ORDER BY updated DESC LIMIT 1\"\n\tnum, err := o.Raw(sql).Values(&rows)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn\n\t} else if num > 0 {\n\t\tformat := \"2006-01-02 15:04:05\"\n\t\tupdatedTime, _ := time.Parse(format, rows[0][\"updated\"].(string))\n\t\tcurrentTime, _ := time.Parse(format, getNow())\n\t\tdiff := currentTime.Unix() - updatedTime.Unix()\n\t\tif int(diff) < g.Config().Hosts.Interval {\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar nodes = make(map[string]interface{})\n\terrors := []string{}\n\tvar result = make(map[string]interface{})\n\tresult[\"error\"] = errors\n\tgetPlatformJSON(nodes, result)\n\tif nodes[\"status\"] == nil {\n\t\treturn\n\t} else if int(nodes[\"status\"].(float64)) != 1 {\n\t\treturn\n\t}\n\tplatformNames := []string{}\n\tplatformsMap := map[string]map[string]interface{}{}\n\thostnames := []string{}\n\thostsMap := map[string]map[string]string{}\n\thostnamesMap := map[string]int{}\n\tidcIDs := []string{}\n\thostname := \"\"\n\tfor _, platform := range nodes[\"result\"].([]interface{}) {\n\t\tcountOfHosts := 0\n\t\tplatformName := platform.(map[string]interface{})[\"platform\"].(string)\n\t\tplatformNames = appendUniqueString(platformNames, platformName)\n\t\tfor _, device := range platform.(map[string]interface{})[\"ip_list\"].([]interface{}) {\n\t\t\thostname = device.(map[string]interface{})[\"hostname\"].(string)\n\t\t\tip := device.(map[string]interface{})[\"ip\"].(string)\n\t\t\tif len(ip) > 0 && ip == getIPFromHostname(hostname, result) {\n\t\t\t\tif _, ok := hostnamesMap[hostname]; !ok {\n\t\t\t\t\thostnames = append(hostnames, hostname)\n\t\t\t\t\tidcID := device.(map[string]interface{})[\"pop_id\"].(string)\n\t\t\t\t\thost := map[string]string{\n\t\t\t\t\t\t\"hostname\": hostname,\n\t\t\t\t\t\t\"activate\": device.(map[string]interface{})[\"ip_status\"].(string),\n\t\t\t\t\t\t\"platform\": platformName,\n\t\t\t\t\t\t\"idcID\": idcID,\n\t\t\t\t\t\t\"ip\": ip,\n\t\t\t\t\t}\n\t\t\t\t\thostsMap[hostname] = host\n\t\t\t\t\tidcIDs = appendUniqueString(idcIDs, idcID)\n\t\t\t\t\thostnamesMap[hostname] = 1\n\t\t\t\t\tcountOfHosts++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tplatformsMap[platformName] = map[string]interface{}{\n\t\t\t\"platformName\": platformName,\n\t\t\t\"count\": countOfHosts,\n\t\t\t\"contacts\": \"\",\n\t\t}\n\t}\n\tsort.Strings(hostnames)\n\tsort.Strings(platformNames)\n\tlog.Debugf(\"platformNames =\", platformNames)\n\tupdateHostsTable(hostnames, hostsMap)\n\tupdatePlatformsTable(platformNames, platformsMap)\n}\n\nfunc syncContactsTable() {\n\tlog.Debugf(\"func syncContactsTable()\")\n\to := orm.NewOrm()\n\to.Using(\"boss\")\n\tvar rows []orm.Params\n\tsql := \"SELECT updated FROM boss.contacts ORDER BY updated DESC LIMIT 1\"\n\tnum, err := o.Raw(sql).Values(&rows)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn\n\t} else if num > 0 {\n\t\tformat := \"2006-01-02 15:04:05\"\n\t\tupdatedTime, _ := time.Parse(format, rows[0][\"updated\"].(string))\n\t\tcurrentTime, _ := time.Parse(format, getNow())\n\t\tdiff := currentTime.Unix() - updatedTime.Unix()\n\t\tif int(diff) < g.Config().Contacts.Interval {\n\t\t\treturn\n\t\t}\n\t}\n\n\tplatformNames := []string{}\n\tsql = \"SELECT DISTINCT platform FROM boss.platforms ORDER BY platform ASC\"\n\tnum, err = o.Raw(sql).Values(&rows)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn\n\t} else if num > 0 {\n\t\tfor _, row := range rows {\n\t\t\tplatformNames = append(platformNames, row[\"platform\"].(string))\n\t\t}\n\t}\n\n\tvar nodes = make(map[string]interface{})\n\terrors := []string{}\n\tvar result = make(map[string]interface{})\n\tresult[\"error\"] = errors\n\tgetPlatformContact(strings.Join(platformNames, \",\"), nodes)\n\tcontactNames := []string{}\n\tcontactsMap := map[string]map[string]interface{}{}\n\tcontacts := nodes[\"result\"].(map[string]interface{})[\"items\"].(map[string]interface{})\n\tfor _, platformName := range platformNames {\n\t\tif items, ok := contacts[platformName]; ok {\n\t\t\tfor _, user := range items.([]interface{}) {\n\t\t\t\tcontactName := user.(map[string]interface{})[\"name\"].(string)\n\t\t\t\tif _, ok := contactsMap[contactName]; !ok {\n\t\t\t\t\tcontactsMap[contactName] = user.(map[string]interface{})\n\t\t\t\t\tcontactNames = append(contactNames, contactName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tsort.Strings(contactNames)\n\tupdateContactsTable(contactNames, contactsMap)\n\taddContactsToPlatformsTable(contacts)\n}\n<|endoftext|>"} {"text":"<commit_before>package datastore\n\nimport (\n\t\"common\"\n\t. \"launchpad.net\/gocheck\"\n\t\"parser\"\n)\n\ntype FilteringSuite struct{}\n\nvar _ = Suite(&FilteringSuite{})\n\nfunc (self *FilteringSuite) TestEqualityFiltering(c *C) {\n\tqueryStr := \"select * from t where column_one == 100 and column_two != 6;\"\n\tquery, err := parser.ParseQuery(queryStr)\n\tc.Assert(err, IsNil)\n\tseries, err := common.StringToSeriesArray(`\n[\n {\n \"points\": [\n {\"values\": [{\"int_value\": 100},{\"int_value\": 5 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 100},{\"int_value\": 6 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 90 },{\"int_value\": 15}], \"timestamp\": 1381346632, \"sequence_number\": 1}\n ],\n \"name\": \"t\",\n \"fields\": [\n {\"type\": \"INT32\", \"name\": \"column_one\"},\n {\"type\": \"INT32\", \"name\": \"column_two\"}\n ]\n }\n]\n`)\n\tc.Assert(err, IsNil)\n\tresult, err := Filter(query, series[0])\n\tc.Assert(err, IsNil)\n\tc.Assert(result, NotNil)\n\tc.Assert(result.Points, HasLen, 1)\n\tc.Assert(*result.Points[0].Values[0].IntValue, Equals, int32(100))\n\tc.Assert(*result.Points[0].Values[1].IntValue, Equals, int32(5))\n}\n\nfunc (self *FilteringSuite) TestInequalityFiltering(c *C) {\n\tqueryStr := \"select * from t where column_one >= 100 and column_two > 6;\"\n\tquery, err := parser.ParseQuery(queryStr)\n\tc.Assert(err, IsNil)\n\tseries, err := common.StringToSeriesArray(`\n[\n {\n \"points\": [\n {\"values\": [{\"int_value\": 100},{\"int_value\": 7 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 100},{\"int_value\": 6 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 90 },{\"int_value\": 15}], \"timestamp\": 1381346632, \"sequence_number\": 1}\n ],\n \"name\": \"t\",\n \"fields\": [\n {\"type\": \"INT32\", \"name\": \"column_one\"},\n {\"type\": \"INT32\", \"name\": \"column_two\"}\n ]\n }\n]\n`)\n\tc.Assert(err, IsNil)\n\tresult, err := Filter(query, series[0])\n\tc.Assert(err, IsNil)\n\tc.Assert(result, NotNil)\n\tc.Assert(result.Points, HasLen, 1)\n\tc.Assert(*result.Points[0].Values[0].IntValue, Equals, int32(100))\n\tc.Assert(*result.Points[0].Values[1].IntValue, Equals, int32(7))\n}\n<commit_msg>add an explicit test for filtering when there's time in the where condition.<commit_after>package datastore\n\nimport (\n\t\"common\"\n\t. \"launchpad.net\/gocheck\"\n\t\"parser\"\n)\n\ntype FilteringSuite struct{}\n\nvar _ = Suite(&FilteringSuite{})\n\nfunc (self *FilteringSuite) TestEqualityFiltering(c *C) {\n\tqueryStr := \"select * from t where column_one == 100 and column_two != 6;\"\n\tquery, err := parser.ParseQuery(queryStr)\n\tc.Assert(err, IsNil)\n\tseries, err := common.StringToSeriesArray(`\n[\n {\n \"points\": [\n {\"values\": [{\"int_value\": 100},{\"int_value\": 5 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 100},{\"int_value\": 6 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 90 },{\"int_value\": 15}], \"timestamp\": 1381346632, \"sequence_number\": 1}\n ],\n \"name\": \"t\",\n \"fields\": [\n {\"type\": \"INT32\", \"name\": \"column_one\"},\n {\"type\": \"INT32\", \"name\": \"column_two\"}\n ]\n }\n]\n`)\n\tc.Assert(err, IsNil)\n\tresult, err := Filter(query, series[0])\n\tc.Assert(err, IsNil)\n\tc.Assert(result, NotNil)\n\tc.Assert(result.Points, HasLen, 1)\n\tc.Assert(*result.Points[0].Values[0].IntValue, Equals, int32(100))\n\tc.Assert(*result.Points[0].Values[1].IntValue, Equals, int32(5))\n}\n\nfunc (self *FilteringSuite) TestInequalityFiltering(c *C) {\n\tqueryStr := \"select * from t where column_one >= 100 and column_two > 6 and time > now() - 1d;\"\n\tquery, err := parser.ParseQuery(queryStr)\n\tc.Assert(err, IsNil)\n\tseries, err := common.StringToSeriesArray(`\n[\n {\n \"points\": [\n {\"values\": [{\"int_value\": 100},{\"int_value\": 7 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 100},{\"int_value\": 6 }], \"timestamp\": 1381346631, \"sequence_number\": 1},\n {\"values\": [{\"int_value\": 90 },{\"int_value\": 15}], \"timestamp\": 1381346632, \"sequence_number\": 1}\n ],\n \"name\": \"t\",\n \"fields\": [\n {\"type\": \"INT32\", \"name\": \"column_one\"},\n {\"type\": \"INT32\", \"name\": \"column_two\"}\n ]\n }\n]\n`)\n\tc.Assert(err, IsNil)\n\tresult, err := Filter(query, series[0])\n\tc.Assert(err, IsNil)\n\tc.Assert(result, NotNil)\n\tc.Assert(result.Points, HasLen, 1)\n\tc.Assert(*result.Points[0].Values[0].IntValue, Equals, int32(100))\n\tc.Assert(*result.Points[0].Values[1].IntValue, Equals, int32(7))\n}\n<|endoftext|>"} {"text":"<commit_before>package integration\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tdockerregistry \"github.com\/openshift\/origin\/pkg\/image\/importer\/dockerv1client\"\n)\n\nconst (\n\tpulpRegistryName = \"registry.access.redhat.com\"\n\tdockerHubV2RegistryName = \"index.docker.io\"\n\tdockerHubV1RegistryName = \"registry.hub.docker.com\"\n\tquayRegistryName = \"quay.io\"\n\n\tmaxRetryCount = 4\n\tretryAfter = time.Millisecond * 500\n)\n\nvar (\n\t\/\/ Below are lists of error patterns for use with `retryOnErrors` utility.\n\n\t\/\/ unreachableErrorPatterns will match following error examples:\n\t\/\/ Get https:\/\/registry.com\/v2\/: dial tcp registry.com:443: i\/o timeout\n\t\/\/ Get https:\/\/registry.com\/v2\/: dial tcp: lookup registry.com: no such host\n\t\/\/ Get https:\/\/registry.com\/v2\/: dial tcp registry.com:443: getsockopt: connection refused\n\t\/\/ Get https:\/\/registry.com\/v2\/: read tcp 127.0.0.1:39849->registry.com:443: read: connection reset by peer\n\t\/\/ Get https:\/\/registry.com\/v2\/: net\/http: request cancelled while waiting for connection\n\t\/\/ Get https:\/\/registry.com\/v2\/: net\/http: TLS handshake timeout\n\t\/\/ the registry \"https:\/\/registry.com\/v2\/\" could not be reached\n\tunreachableErrorPatterns = []string{\n\t\t\"dial tcp\",\n\t\t\"read tcp\",\n\t\t\"net\/http\",\n\t\t\"could not be reached\",\n\t}\n\n\t\/\/ imageNotFoundErrorPatterns will match following error examples:\n\t\/\/ the image \"...\" in repository \"...\" was not found and may have been deleted\n\t\/\/ tag \"...\" has not been set on repository \"...\"\n\t\/\/ use only with non-internal registry\n\timageNotFoundErrorPatterns = []string{\n\t\t\"was not found and may have been deleted\",\n\t\t\"has not been set on repository\",\n\t}\n)\n\n\/\/ retryOnErrors invokes given function several times until it succeeds,\n\/\/ returns unexpected error or a maximum number of attempts is reached. It\n\/\/ should be used to wrap calls to remote registry to prevent test failures\n\/\/ because of short-term outages or image updates.\nfunc retryOnErrors(t *testing.T, errorPatterns []string, f func() error) error {\n\ttimeout := retryAfter\n\tattempt := 0\n\tfor err := f(); err != nil; err = f() {\n\t\tmatch := false\n\t\tfor _, pattern := range errorPatterns {\n\t\t\tif strings.Contains(err.Error(), pattern) {\n\t\t\t\tmatch = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !match || attempt >= maxRetryCount {\n\t\t\treturn err\n\t\t}\n\n\t\tt.Logf(\"caught error \\\"%v\\\", retrying in %s\", err, timeout.String())\n\t\ttime.Sleep(timeout)\n\t\ttimeout = timeout * 2\n\t\tattempt += 1\n\t}\n\treturn nil\n}\n\n\/\/ retryWhenUnreachable is a convenient wrapper for retryOnErrors that makes it\n\/\/ retry when the registry is not reachable. Additional error patterns may\n\/\/ follow.\nfunc retryWhenUnreachable(t *testing.T, f func() error, errorPatterns ...string) error {\n\treturn retryOnErrors(t, append(errorPatterns, unreachableErrorPatterns...), f)\n}\n\nfunc TestRegistryClientConnect(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\tconn, err := c.Connect(\"docker.io\", false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, s := range []string{\"index.docker.io\", \"https:\/\/docker.io\", \"https:\/\/index.docker.io\"} {\n\t\totherConn, err := c.Connect(s, false)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: can't connect: %v\", s, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(otherConn, conn) {\n\t\t\tt.Errorf(\"%s: did not reuse connection: %#v %#v\", s, conn, otherConn)\n\t\t}\n\t}\n\n\totherConn, err := c.Connect(\"index.docker.io:443\", false)\n\tif err != nil || reflect.DeepEqual(otherConn, conn) {\n\t\tt.Errorf(\"should not have reused index.docker.io:443: %v\", err)\n\t}\n\n\tif _, err := c.Connect(\"http:\/\/ba%3\/\", false); err == nil {\n\t\tt.Error(\"Unexpected non-error\")\n\t}\n}\n\nfunc TestRegistryClientConnectPulpRegistry(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\tconn, err := c.Connect(pulpRegistryName, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"library\", \"rhel\", \"latest\")\n\t\treturn err\n\t}, imageNotFoundErrorPatterns...)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"x509: certificate has expired or is not yet valid\") {\n\t\t\tt.Skipf(\"SKIPPING: due to expired certificate of %s: %v\", pulpRegistryName, err)\n\t\t}\n\t\tt.Skip(\"pulp is failing\")\n\t\t\/\/t.Fatal(err)\n\t}\n\tif len(image.Image.ID) == 0 {\n\t\tt.Fatalf(\"image had no ID: %#v\", image)\n\t}\n}\n\nfunc TestRegistryClientDockerHubV2(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\tconn, err := c.Connect(dockerHubV2RegistryName, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"openshift\", \"hello-openshift\", \"latest\")\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(image.Image.ID) == 0 {\n\t\tt.Fatalf(\"image had no ID: %#v\", image)\n\t}\n}\n\nfunc TestRegistryClientDockerHubV1(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\t\/\/ a v1 only path\n\tconn, err := c.Connect(dockerHubV1RegistryName, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"openshift\", \"hello-openshift\", \"latest\")\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(image.Image.ID) == 0 {\n\t\tt.Fatalf(\"image had no ID: %#v\", image)\n\t}\n}\n\nfunc TestRegistryClientRegistryNotFound(t *testing.T) {\n\tconn, err := dockerregistry.NewClient(10*time.Second, true).Connect(\"localhost:65000\", false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := conn.ImageByID(\"foo\", \"bar\", \"baz\"); !dockerregistry.IsRegistryNotFound(err) {\n\t\tt.Error(err)\n\t}\n}\n\nfunc doTestRegistryClientImage(t *testing.T, registry, reponame, version string) {\n\tconn, err := dockerregistry.NewClient(10*time.Second, version == \"v2\").Connect(registry, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = retryWhenUnreachable(t, func() error {\n\t\t_, err := conn.ImageByTag(\"openshift\", \"origin-not-found\", \"latest\")\n\t\treturn err\n\t})\n\tif err == nil || (!dockerregistry.IsRepositoryNotFound(err) && !dockerregistry.IsTagNotFound(err)) {\n\t\tt.Errorf(\"%s: unexpected error: %v\", version, err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"openshift\", reponame, \"latest\")\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif image.Image.Comment != \"Imported from -\" {\n\t\tt.Errorf(\"%s: unexpected image comment\", version)\n\t}\n\n\tif image.Image.Architecture != \"amd64\" {\n\t\tt.Errorf(\"%s: unexpected image architecture\", version)\n\t}\n\n\tif version == \"v2\" && !image.PullByID {\n\t\tt.Errorf(\"%s: should be able to pull by ID %s\", version, image.Image.ID)\n\t}\n\n\tvar other *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\tother, err = conn.ImageByID(\"openshift\", reponame, image.Image.ID)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(other.Image.ContainerConfig.Entrypoint, image.Image.ContainerConfig.Entrypoint) {\n\t\tt.Errorf(\"%s: unexpected image: %#v\", version, other)\n\t}\n}\n\nfunc TestRegistryClientAPIv2ManifestV2Schema2(t *testing.T) {\n\tt.Log(\"openshift\/schema-v2-test-repo was pushed by Docker 1.11.1\")\n\tdoTestRegistryClientImage(t, dockerHubV2RegistryName, \"schema-v2-test-repo\", \"v2\")\n}\n\nfunc TestRegistryClientAPIv2ManifestV2Schema1(t *testing.T) {\n\tt.Log(\"openshift\/schema-v1-test-repo was pushed by Docker 1.8.2\")\n\tdoTestRegistryClientImage(t, dockerHubV2RegistryName, \"schema-v1-test-repo\", \"v2\")\n}\n\nfunc TestRegistryClientAPIv1(t *testing.T) {\n\tt.Log(\"openshift\/schema-v1-test-repo was pushed by Docker 1.8.2\")\n\tdoTestRegistryClientImage(t, dockerHubV1RegistryName, \"schema-v1-test-repo\", \"v1\")\n}\n\nfunc TestRegistryClientQuayIOImage(t *testing.T) {\n\tconn, err := dockerregistry.NewClient(10*time.Second, true).Connect(\"quay.io\", false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = retryWhenUnreachable(t, func() error {\n\t\t_, err := conn.ImageByTag(\"coreos\", \"etcd\", \"latest\")\n\t\treturn err\n\t}, imageNotFoundErrorPatterns...)\n\tif err != nil {\n\t\tt.Skipf(\"SKIPPING: unexpected error from quay.io: %v\", err)\n\t}\n}\n<commit_msg>disable broken test<commit_after>package integration\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tdockerregistry \"github.com\/openshift\/origin\/pkg\/image\/importer\/dockerv1client\"\n)\n\nconst (\n\tpulpRegistryName = \"registry.access.redhat.com\"\n\tdockerHubV2RegistryName = \"index.docker.io\"\n\tdockerHubV1RegistryName = \"registry.hub.docker.com\"\n\tquayRegistryName = \"quay.io\"\n\n\tmaxRetryCount = 4\n\tretryAfter = time.Millisecond * 500\n)\n\nvar (\n\t\/\/ Below are lists of error patterns for use with `retryOnErrors` utility.\n\n\t\/\/ unreachableErrorPatterns will match following error examples:\n\t\/\/ Get https:\/\/registry.com\/v2\/: dial tcp registry.com:443: i\/o timeout\n\t\/\/ Get https:\/\/registry.com\/v2\/: dial tcp: lookup registry.com: no such host\n\t\/\/ Get https:\/\/registry.com\/v2\/: dial tcp registry.com:443: getsockopt: connection refused\n\t\/\/ Get https:\/\/registry.com\/v2\/: read tcp 127.0.0.1:39849->registry.com:443: read: connection reset by peer\n\t\/\/ Get https:\/\/registry.com\/v2\/: net\/http: request cancelled while waiting for connection\n\t\/\/ Get https:\/\/registry.com\/v2\/: net\/http: TLS handshake timeout\n\t\/\/ the registry \"https:\/\/registry.com\/v2\/\" could not be reached\n\tunreachableErrorPatterns = []string{\n\t\t\"dial tcp\",\n\t\t\"read tcp\",\n\t\t\"net\/http\",\n\t\t\"could not be reached\",\n\t}\n\n\t\/\/ imageNotFoundErrorPatterns will match following error examples:\n\t\/\/ the image \"...\" in repository \"...\" was not found and may have been deleted\n\t\/\/ tag \"...\" has not been set on repository \"...\"\n\t\/\/ use only with non-internal registry\n\timageNotFoundErrorPatterns = []string{\n\t\t\"was not found and may have been deleted\",\n\t\t\"has not been set on repository\",\n\t}\n)\n\n\/\/ retryOnErrors invokes given function several times until it succeeds,\n\/\/ returns unexpected error or a maximum number of attempts is reached. It\n\/\/ should be used to wrap calls to remote registry to prevent test failures\n\/\/ because of short-term outages or image updates.\nfunc retryOnErrors(t *testing.T, errorPatterns []string, f func() error) error {\n\ttimeout := retryAfter\n\tattempt := 0\n\tfor err := f(); err != nil; err = f() {\n\t\tmatch := false\n\t\tfor _, pattern := range errorPatterns {\n\t\t\tif strings.Contains(err.Error(), pattern) {\n\t\t\t\tmatch = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !match || attempt >= maxRetryCount {\n\t\t\treturn err\n\t\t}\n\n\t\tt.Logf(\"caught error \\\"%v\\\", retrying in %s\", err, timeout.String())\n\t\ttime.Sleep(timeout)\n\t\ttimeout = timeout * 2\n\t\tattempt += 1\n\t}\n\treturn nil\n}\n\n\/\/ retryWhenUnreachable is a convenient wrapper for retryOnErrors that makes it\n\/\/ retry when the registry is not reachable. Additional error patterns may\n\/\/ follow.\nfunc retryWhenUnreachable(t *testing.T, f func() error, errorPatterns ...string) error {\n\treturn retryOnErrors(t, append(errorPatterns, unreachableErrorPatterns...), f)\n}\n\nfunc TestRegistryClientConnect(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\tconn, err := c.Connect(\"docker.io\", false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, s := range []string{\"index.docker.io\", \"https:\/\/docker.io\", \"https:\/\/index.docker.io\"} {\n\t\totherConn, err := c.Connect(s, false)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: can't connect: %v\", s, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(otherConn, conn) {\n\t\t\tt.Errorf(\"%s: did not reuse connection: %#v %#v\", s, conn, otherConn)\n\t\t}\n\t}\n\n\totherConn, err := c.Connect(\"index.docker.io:443\", false)\n\tif err != nil || reflect.DeepEqual(otherConn, conn) {\n\t\tt.Errorf(\"should not have reused index.docker.io:443: %v\", err)\n\t}\n\n\tif _, err := c.Connect(\"http:\/\/ba%3\/\", false); err == nil {\n\t\tt.Error(\"Unexpected non-error\")\n\t}\n}\n\nfunc TestRegistryClientConnectPulpRegistry(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\tconn, err := c.Connect(pulpRegistryName, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"library\", \"rhel\", \"latest\")\n\t\treturn err\n\t}, imageNotFoundErrorPatterns...)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"x509: certificate has expired or is not yet valid\") {\n\t\t\tt.Skipf(\"SKIPPING: due to expired certificate of %s: %v\", pulpRegistryName, err)\n\t\t}\n\t\tt.Skip(\"pulp is failing\")\n\t\t\/\/t.Fatal(err)\n\t}\n\tif len(image.Image.ID) == 0 {\n\t\tt.Fatalf(\"image had no ID: %#v\", image)\n\t}\n}\n\nfunc TestRegistryClientDockerHubV2(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\tconn, err := c.Connect(dockerHubV2RegistryName, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"openshift\", \"hello-openshift\", \"latest\")\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(image.Image.ID) == 0 {\n\t\tt.Fatalf(\"image had no ID: %#v\", image)\n\t}\n}\n\nfunc TestRegistryClientDockerHubV1(t *testing.T) {\n\tc := dockerregistry.NewClient(10*time.Second, true)\n\t\/\/ a v1 only path\n\tconn, err := c.Connect(dockerHubV1RegistryName, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"openshift\", \"hello-openshift\", \"latest\")\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(image.Image.ID) == 0 {\n\t\tt.Fatalf(\"image had no ID: %#v\", image)\n\t}\n}\n\nfunc TestRegistryClientRegistryNotFound(t *testing.T) {\n\tconn, err := dockerregistry.NewClient(10*time.Second, true).Connect(\"localhost:65000\", false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := conn.ImageByID(\"foo\", \"bar\", \"baz\"); !dockerregistry.IsRegistryNotFound(err) {\n\t\tt.Error(err)\n\t}\n}\n\nfunc doTestRegistryClientImage(t *testing.T, registry, reponame, version string) {\n\tconn, err := dockerregistry.NewClient(10*time.Second, version == \"v2\").Connect(registry, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = retryWhenUnreachable(t, func() error {\n\t\t_, err := conn.ImageByTag(\"openshift\", \"origin-not-found\", \"latest\")\n\t\treturn err\n\t})\n\tif err == nil || (!dockerregistry.IsRepositoryNotFound(err) && !dockerregistry.IsTagNotFound(err)) {\n\t\tt.Errorf(\"%s: unexpected error: %v\", version, err)\n\t}\n\n\tvar image *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\timage, err = conn.ImageByTag(\"openshift\", reponame, \"latest\")\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif image.Image.Comment != \"Imported from -\" {\n\t\tt.Errorf(\"%s: unexpected image comment\", version)\n\t}\n\n\tif image.Image.Architecture != \"amd64\" {\n\t\tt.Errorf(\"%s: unexpected image architecture\", version)\n\t}\n\n\tif version == \"v2\" && !image.PullByID {\n\t\tt.Errorf(\"%s: should be able to pull by ID %s\", version, image.Image.ID)\n\t}\n\n\tvar other *dockerregistry.Image\n\terr = retryWhenUnreachable(t, func() error {\n\t\tother, err = conn.ImageByID(\"openshift\", reponame, image.Image.ID)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(other.Image.ContainerConfig.Entrypoint, image.Image.ContainerConfig.Entrypoint) {\n\t\tt.Errorf(\"%s: unexpected image: %#v\", version, other)\n\t}\n}\n\nfunc TestRegistryClientAPIv2ManifestV2Schema2(t *testing.T) {\n\tt.Log(\"openshift\/schema-v2-test-repo was pushed by Docker 1.11.1\")\n\tdoTestRegistryClientImage(t, dockerHubV2RegistryName, \"schema-v2-test-repo\", \"v2\")\n}\n\nfunc TestRegistryClientAPIv2ManifestV2Schema1(t *testing.T) {\n\tt.Log(\"openshift\/schema-v1-test-repo was pushed by Docker 1.8.2\")\n\tdoTestRegistryClientImage(t, dockerHubV2RegistryName, \"schema-v1-test-repo\", \"v2\")\n}\n\nfunc DISABLEDTestRegistryClientAPIv1(t *testing.T) {\n\tt.Log(\"openshift\/schema-v1-test-repo was pushed by Docker 1.8.2\")\n\tdoTestRegistryClientImage(t, dockerHubV1RegistryName, \"schema-v1-test-repo\", \"v1\")\n}\n\nfunc TestRegistryClientQuayIOImage(t *testing.T) {\n\tconn, err := dockerregistry.NewClient(10*time.Second, true).Connect(\"quay.io\", false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = retryWhenUnreachable(t, func() error {\n\t\t_, err := conn.ImageByTag(\"coreos\", \"etcd\", \"latest\")\n\t\treturn err\n\t}, imageNotFoundErrorPatterns...)\n\tif err != nil {\n\t\tt.Skipf(\"SKIPPING: unexpected error from quay.io: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Matthew Holt and The Caddy Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage acmeserver\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/caddyserver\/caddy\/v2\"\n\t\"github.com\/caddyserver\/caddy\/v2\/modules\/caddyhttp\"\n\t\"github.com\/caddyserver\/caddy\/v2\/modules\/caddypki\"\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/smallstep\/certificates\/acme\"\n\tacmeAPI \"github.com\/smallstep\/certificates\/acme\/api\"\n\tacmeNoSQL \"github.com\/smallstep\/certificates\/acme\/db\/nosql\"\n\t\"github.com\/smallstep\/certificates\/authority\"\n\t\"github.com\/smallstep\/certificates\/authority\/provisioner\"\n\t\"github.com\/smallstep\/certificates\/db\"\n\t\"github.com\/smallstep\/nosql\"\n\t\"go.uber.org\/zap\"\n)\n\nfunc init() {\n\tcaddy.RegisterModule(Handler{})\n}\n\n\/\/ Handler is an ACME server handler.\ntype Handler struct {\n\t\/\/ The ID of the CA to use for signing. This refers to\n\t\/\/ the ID given to the CA in the `pki` app. If omitted,\n\t\/\/ the default ID is \"local\".\n\tCA string `json:\"ca,omitempty\"`\n\n\t\/\/ The hostname or IP address by which ACME clients\n\t\/\/ will access the server. This is used to populate\n\t\/\/ the ACME directory endpoint. Default: localhost.\n\t\/\/ COMPATIBILITY NOTE \/ TODO: This property may go away in the\n\t\/\/ future, as it is currently only required due to\n\t\/\/ limitations in the underlying library. Do not rely\n\t\/\/ on this property long-term; check release notes.\n\tHost string `json:\"host,omitempty\"`\n\n\t\/\/ The path prefix under which to serve all ACME\n\t\/\/ endpoints. All other requests will not be served\n\t\/\/ by this handler and will be passed through to\n\t\/\/ the next one. Default: \"\/acme\/\"\n\t\/\/ COMPATIBILITY NOTE \/ TODO: This property may go away in the\n\t\/\/ future, as it is currently only required due to\n\t\/\/ limitations in the underlying library. Do not rely\n\t\/\/ on this property long-term; check release notes.\n\tPathPrefix string `json:\"path_prefix,omitempty\"`\n\n\t\/\/ If true, the CA's root will be the issuer instead of\n\t\/\/ the intermediate. This is NOT recommended and should\n\t\/\/ only be used when devices\/clients do not properly\n\t\/\/ validate certificate chains. EXPERIMENTAL: Might be\n\t\/\/ changed or removed in the future.\n\tSignWithRoot bool `json:\"sign_with_root,omitempty\"`\n\n\tacmeEndpoints http.Handler\n\tlogger *zap.Logger\n}\n\n\/\/ CaddyModule returns the Caddy module information.\nfunc (Handler) CaddyModule() caddy.ModuleInfo {\n\treturn caddy.ModuleInfo{\n\t\tID: \"http.handlers.acme_server\",\n\t\tNew: func() caddy.Module { return new(Handler) },\n\t}\n}\n\n\/\/ Provision sets up the ACME server handler.\nfunc (ash *Handler) Provision(ctx caddy.Context) error {\n\tash.logger = ctx.Logger(ash)\n\t\/\/ set some defaults\n\tif ash.CA == \"\" {\n\t\tash.CA = caddypki.DefaultCAID\n\t}\n\tif ash.Host == \"\" {\n\t\tash.Host = defaultHost\n\t}\n\tif ash.PathPrefix == \"\" {\n\t\tash.PathPrefix = defaultPathPrefix\n\t}\n\n\t\/\/ get a reference to the configured CA\n\tappModule, err := ctx.App(\"pki\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tpkiApp := appModule.(*caddypki.PKI)\n\tca, ok := pkiApp.CAs[ash.CA]\n\tif !ok {\n\t\treturn fmt.Errorf(\"no certificate authority configured with id: %s\", ash.CA)\n\t}\n\n\tdatabase, err := ash.openDatabase()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthorityConfig := caddypki.AuthorityConfig{\n\t\tSignWithRoot: ash.SignWithRoot,\n\t\tAuthConfig: &authority.AuthConfig{\n\t\t\tProvisioners: provisioner.List{\n\t\t\t\t&provisioner.ACME{\n\t\t\t\t\tName: ash.CA,\n\t\t\t\t\tType: provisioner.TypeACME.String(),\n\t\t\t\t\tClaims: &provisioner.Claims{\n\t\t\t\t\t\tMinTLSDur: &provisioner.Duration{Duration: 5 * time.Minute},\n\t\t\t\t\t\tMaxTLSDur: &provisioner.Duration{Duration: 24 * time.Hour * 365},\n\t\t\t\t\t\tDefaultTLSDur: &provisioner.Duration{Duration: 12 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tDB: database,\n\t}\n\n\tauth, err := ca.NewAuthority(authorityConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar acmeDB acme.DB\n\tif authorityConfig.DB != nil {\n\t\tacmeDB, err = acmeNoSQL.New(auth.GetDatabase().(nosql.DB))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"configuring ACME DB: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ create the router for the ACME endpoints\n\tacmeRouterHandler := acmeAPI.NewHandler(acmeAPI.HandlerOptions{\n\t\tCA: auth,\n\t\tDB: acmeDB, \/\/ stores all the server state\n\t\tDNS: ash.Host, \/\/ used for directory links; TODO: not needed (follow-up upstream with step-ca)\n\t\tPrefix: strings.Trim(ash.PathPrefix, \"\/\"), \/\/ used for directory links\n\t})\n\n\t\/\/ extract its http.Handler so we can use it directly\n\tr := chi.NewRouter()\n\tr.Route(ash.PathPrefix, func(r chi.Router) {\n\t\tacmeRouterHandler.Route(r)\n\t})\n\tash.acmeEndpoints = r\n\n\treturn nil\n}\n\nfunc (ash Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {\n\tif strings.HasPrefix(r.URL.Path, ash.PathPrefix) {\n\t\tash.acmeEndpoints.ServeHTTP(w, r)\n\t\treturn nil\n\t}\n\treturn next.ServeHTTP(w, r)\n}\n\nfunc (ash Handler) getDatabaseKey() string {\n\tkey := ash.CA\n\tkey = strings.ToLower(key)\n\tkey = strings.TrimSpace(key)\n\treturn keyCleaner.ReplaceAllLiteralString(key, \"\")\n}\n\n\/\/ Cleanup implements caddy.CleanerUpper and closes any idle databases.\nfunc (ash Handler) Cleanup() error {\n\tkey := ash.getDatabaseKey()\n\tdeleted, err := databasePool.Delete(key)\n\tif deleted {\n\t\tash.logger.Debug(\"unloading unused CA database\", zap.String(\"db_key\", key))\n\t}\n\tif err != nil {\n\t\tash.logger.Error(\"closing CA database\", zap.String(\"db_key\", key), zap.Error(err))\n\t}\n\treturn err\n}\n\nfunc (ash Handler) openDatabase() (*db.AuthDB, error) {\n\tkey := ash.getDatabaseKey()\n\tdatabase, loaded, err := databasePool.LoadOrNew(key, func() (caddy.Destructor, error) {\n\t\tdbFolder := filepath.Join(caddy.AppDataDir(), \"acme_server\", key)\n\t\tdbPath := filepath.Join(dbFolder, \"db\")\n\n\t\terr := os.MkdirAll(dbFolder, 0755)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"making folder for CA database: %v\", err)\n\t\t}\n\n\t\tdbConfig := &db.Config{\n\t\t\tType: \"bbolt\",\n\t\t\tDataSource: dbPath,\n\t\t}\n\t\tdatabase, err := db.New(dbConfig)\n\t\treturn databaseCloser{&database}, err\n\t})\n\n\tif loaded {\n\t\tash.logger.Debug(\"loaded preexisting CA database\", zap.String(\"db_key\", key))\n\t}\n\n\treturn database.(databaseCloser).DB, err\n}\n\nconst (\n\tdefaultHost = \"localhost\"\n\tdefaultPathPrefix = \"\/acme\/\"\n)\n\nvar keyCleaner = regexp.MustCompile(`[^\\w.-_]`)\nvar databasePool = caddy.NewUsagePool()\n\ntype databaseCloser struct {\n\tDB *db.AuthDB\n}\n\nfunc (closer databaseCloser) Destruct() error {\n\treturn (*closer.DB).Shutdown()\n}\n\n\/\/ Interface guards\nvar (\n\t_ caddyhttp.MiddlewareHandler = (*Handler)(nil)\n\t_ caddy.Provisioner = (*Handler)(nil)\n)\n<commit_msg>acmeserver: Don't set host for directory links by default<commit_after>\/\/ Copyright 2015 Matthew Holt and The Caddy Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage acmeserver\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/caddyserver\/caddy\/v2\"\n\t\"github.com\/caddyserver\/caddy\/v2\/modules\/caddyhttp\"\n\t\"github.com\/caddyserver\/caddy\/v2\/modules\/caddypki\"\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/smallstep\/certificates\/acme\"\n\tacmeAPI \"github.com\/smallstep\/certificates\/acme\/api\"\n\tacmeNoSQL \"github.com\/smallstep\/certificates\/acme\/db\/nosql\"\n\t\"github.com\/smallstep\/certificates\/authority\"\n\t\"github.com\/smallstep\/certificates\/authority\/provisioner\"\n\t\"github.com\/smallstep\/certificates\/db\"\n\t\"github.com\/smallstep\/nosql\"\n\t\"go.uber.org\/zap\"\n)\n\nfunc init() {\n\tcaddy.RegisterModule(Handler{})\n}\n\n\/\/ Handler is an ACME server handler.\ntype Handler struct {\n\t\/\/ The ID of the CA to use for signing. This refers to\n\t\/\/ the ID given to the CA in the `pki` app. If omitted,\n\t\/\/ the default ID is \"local\".\n\tCA string `json:\"ca,omitempty\"`\n\n\t\/\/ The hostname or IP address by which ACME clients\n\t\/\/ will access the server. This is used to populate\n\t\/\/ the ACME directory endpoint. If not set, the Host\n\t\/\/ header of the request will be used.\n\t\/\/ COMPATIBILITY NOTE \/ TODO: This property may go away in the\n\t\/\/ future. Do not rely on this property long-term; check release notes.\n\tHost string `json:\"host,omitempty\"`\n\n\t\/\/ The path prefix under which to serve all ACME\n\t\/\/ endpoints. All other requests will not be served\n\t\/\/ by this handler and will be passed through to\n\t\/\/ the next one. Default: \"\/acme\/\".\n\t\/\/ COMPATIBILITY NOTE \/ TODO: This property may go away in the\n\t\/\/ future, as it is currently only required due to\n\t\/\/ limitations in the underlying library. Do not rely\n\t\/\/ on this property long-term; check release notes.\n\tPathPrefix string `json:\"path_prefix,omitempty\"`\n\n\t\/\/ If true, the CA's root will be the issuer instead of\n\t\/\/ the intermediate. This is NOT recommended and should\n\t\/\/ only be used when devices\/clients do not properly\n\t\/\/ validate certificate chains. EXPERIMENTAL: Might be\n\t\/\/ changed or removed in the future.\n\tSignWithRoot bool `json:\"sign_with_root,omitempty\"`\n\n\tacmeEndpoints http.Handler\n\tlogger *zap.Logger\n}\n\n\/\/ CaddyModule returns the Caddy module information.\nfunc (Handler) CaddyModule() caddy.ModuleInfo {\n\treturn caddy.ModuleInfo{\n\t\tID: \"http.handlers.acme_server\",\n\t\tNew: func() caddy.Module { return new(Handler) },\n\t}\n}\n\n\/\/ Provision sets up the ACME server handler.\nfunc (ash *Handler) Provision(ctx caddy.Context) error {\n\tash.logger = ctx.Logger(ash)\n\t\/\/ set some defaults\n\tif ash.CA == \"\" {\n\t\tash.CA = caddypki.DefaultCAID\n\t}\n\tif ash.PathPrefix == \"\" {\n\t\tash.PathPrefix = defaultPathPrefix\n\t}\n\n\t\/\/ get a reference to the configured CA\n\tappModule, err := ctx.App(\"pki\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tpkiApp := appModule.(*caddypki.PKI)\n\tca, ok := pkiApp.CAs[ash.CA]\n\tif !ok {\n\t\treturn fmt.Errorf(\"no certificate authority configured with id: %s\", ash.CA)\n\t}\n\n\tdatabase, err := ash.openDatabase()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthorityConfig := caddypki.AuthorityConfig{\n\t\tSignWithRoot: ash.SignWithRoot,\n\t\tAuthConfig: &authority.AuthConfig{\n\t\t\tProvisioners: provisioner.List{\n\t\t\t\t&provisioner.ACME{\n\t\t\t\t\tName: ash.CA,\n\t\t\t\t\tType: provisioner.TypeACME.String(),\n\t\t\t\t\tClaims: &provisioner.Claims{\n\t\t\t\t\t\tMinTLSDur: &provisioner.Duration{Duration: 5 * time.Minute},\n\t\t\t\t\t\tMaxTLSDur: &provisioner.Duration{Duration: 24 * time.Hour * 365},\n\t\t\t\t\t\tDefaultTLSDur: &provisioner.Duration{Duration: 12 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tDB: database,\n\t}\n\n\tauth, err := ca.NewAuthority(authorityConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar acmeDB acme.DB\n\tif authorityConfig.DB != nil {\n\t\tacmeDB, err = acmeNoSQL.New(auth.GetDatabase().(nosql.DB))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"configuring ACME DB: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ create the router for the ACME endpoints\n\tacmeRouterHandler := acmeAPI.NewHandler(acmeAPI.HandlerOptions{\n\t\tCA: auth,\n\t\tDB: acmeDB, \/\/ stores all the server state\n\t\tDNS: ash.Host, \/\/ used for directory links\n\t\tPrefix: strings.Trim(ash.PathPrefix, \"\/\"), \/\/ used for directory links\n\t})\n\n\t\/\/ extract its http.Handler so we can use it directly\n\tr := chi.NewRouter()\n\tr.Route(ash.PathPrefix, func(r chi.Router) {\n\t\tacmeRouterHandler.Route(r)\n\t})\n\tash.acmeEndpoints = r\n\n\treturn nil\n}\n\nfunc (ash Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {\n\tif strings.HasPrefix(r.URL.Path, ash.PathPrefix) {\n\t\tash.acmeEndpoints.ServeHTTP(w, r)\n\t\treturn nil\n\t}\n\treturn next.ServeHTTP(w, r)\n}\n\nfunc (ash Handler) getDatabaseKey() string {\n\tkey := ash.CA\n\tkey = strings.ToLower(key)\n\tkey = strings.TrimSpace(key)\n\treturn keyCleaner.ReplaceAllLiteralString(key, \"\")\n}\n\n\/\/ Cleanup implements caddy.CleanerUpper and closes any idle databases.\nfunc (ash Handler) Cleanup() error {\n\tkey := ash.getDatabaseKey()\n\tdeleted, err := databasePool.Delete(key)\n\tif deleted {\n\t\tash.logger.Debug(\"unloading unused CA database\", zap.String(\"db_key\", key))\n\t}\n\tif err != nil {\n\t\tash.logger.Error(\"closing CA database\", zap.String(\"db_key\", key), zap.Error(err))\n\t}\n\treturn err\n}\n\nfunc (ash Handler) openDatabase() (*db.AuthDB, error) {\n\tkey := ash.getDatabaseKey()\n\tdatabase, loaded, err := databasePool.LoadOrNew(key, func() (caddy.Destructor, error) {\n\t\tdbFolder := filepath.Join(caddy.AppDataDir(), \"acme_server\", key)\n\t\tdbPath := filepath.Join(dbFolder, \"db\")\n\n\t\terr := os.MkdirAll(dbFolder, 0755)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"making folder for CA database: %v\", err)\n\t\t}\n\n\t\tdbConfig := &db.Config{\n\t\t\tType: \"bbolt\",\n\t\t\tDataSource: dbPath,\n\t\t}\n\t\tdatabase, err := db.New(dbConfig)\n\t\treturn databaseCloser{&database}, err\n\t})\n\n\tif loaded {\n\t\tash.logger.Debug(\"loaded preexisting CA database\", zap.String(\"db_key\", key))\n\t}\n\n\treturn database.(databaseCloser).DB, err\n}\n\nconst defaultPathPrefix = \"\/acme\/\"\n\nvar keyCleaner = regexp.MustCompile(`[^\\w.-_]`)\nvar databasePool = caddy.NewUsagePool()\n\ntype databaseCloser struct {\n\tDB *db.AuthDB\n}\n\nfunc (closer databaseCloser) Destruct() error {\n\treturn (*closer.DB).Shutdown()\n}\n\n\/\/ Interface guards\nvar (\n\t_ caddyhttp.MiddlewareHandler = (*Handler)(nil)\n\t_ caddy.Provisioner = (*Handler)(nil)\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/stampzilla\/gocast\"\n\t\"github.com\/stampzilla\/gocast\/events\"\n\t\"github.com\/stampzilla\/gocast\/handlers\"\n)\n\ntype Chromecast struct {\n\tId string\n\tName_ string `json:\"Name\"`\n\n\tPrimaryApp string\n\tPrimaryEndpoint string\n\t\/\/PlaybackActive bool\n\t\/\/Paused bool\n\n\tIsStandBy bool\n\tIsActiveInput bool\n\n\tVolume float64\n\tMuted bool\n\n\tAddr net.IP\n\tPort int\n\n\tpublish func()\n\n\tmediaHandler *handlers.Media\n\tmediaConnectionHandler *handlers.Connection\n\n\t*gocast.Device\n}\n\nfunc NewChromecast(d *gocast.Device) *Chromecast {\n\tc := &Chromecast{\n\t\tDevice: d,\n\t}\n\n\td.OnEvent(c.Event)\n\terr := d.Connect()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn c\n\t}\n\n\tc.mediaHandler = &handlers.Media{}\n\tc.mediaConnectionHandler = &handlers.Connection{}\n\n\treturn c\n}\n\nfunc (c *Chromecast) Play() {\n\tc.mediaHandler.Play()\n}\nfunc (c *Chromecast) Pause() {\n\tc.mediaHandler.Pause()\n}\nfunc (c *Chromecast) Stop() {\n\tc.mediaHandler.Stop()\n}\n\nfunc (c *Chromecast) PlayUrl(url string, contentType string) {\n\tc.Device.ReceiverHandler.LaunchApp(gocast.AppMedia)\n\tif contentType == \"\" {\n\t\tcontentType = \"audio\/mpeg\"\n\t}\n\titem := handlers.MediaItem{\n\t\tContentId: url,\n\t\tStreamType: \"BUFFERED\",\n\t\tContentType: contentType,\n\t}\n\tc.mediaHandler.LoadMedia(item, 0, true, map[string]interface{}{})\n}\n\nfunc (c *Chromecast) Listen() {\n}\n\nfunc (c *Chromecast) Event(event events.Event) {\n\tswitch data := event.(type) {\n\tcase events.Connected:\n\t\tlog.Info(c.Name(), \"- Connected, weeihoo\")\n\n\t\tc.Addr = c.Ip()\n\t\tc.Port = c.Device.Port()\n\t\tc.Id = c.Uuid()\n\t\tc.Name_ = c.Name()\n\n\t\tstate.Add(c)\n\tcase events.Disconnected:\n\t\tlog.Warn(c.Name(), \"- Disconnected, bah :\/\")\n\n\t\tstate.Remove(c)\n\n\t\tc.Device.Connect()\n\tcase events.AppStarted:\n\t\tlog.Info(c.Name(), \"- App started:\", data.DisplayName, \"(\", data.AppID, \")\")\n\t\t\/\/spew.Dump(\"Data:\", data)\n\n\t\tc.PrimaryApp = data.DisplayName\n\t\tc.PrimaryEndpoint = data.TransportId\n\n\t\t\/\/If the app supports media controls lets subscribe to it\n\t\tif data.HasNamespace(\"urn:x-cast:com.google.cast.media\") {\n\t\t\tc.Subscribe(\"urn:x-cast:com.google.cast.tp.connection\", data.TransportId, c.mediaConnectionHandler)\n\t\t\tc.Subscribe(\"urn:x-cast:com.google.cast.media\", data.TransportId, c.mediaHandler)\n\t\t}\n\n\tcase events.AppStopped:\n\t\tlog.Info(c.Name(), \"- App stopped:\", data.DisplayName, \"(\", data.AppID, \")\")\n\t\t\/\/spew.Dump(\"Data:\", data)\n\n\t\t\/\/unsubscribe from old channels\n\t\tfor _, v := range data.Namespaces {\n\t\t\tif v.Name == \"urn:x-cast:com.google.cast.media\" {\n\t\t\t\tc.mediaConnectionHandler.Disconnect()\n\t\t\t}\n\t\t\tc.UnsubscribeByUrnAndDestinationId(v.Name, data.TransportId)\n\t\t}\n\t\tc.PrimaryApp = \"\"\n\t\tc.PrimaryEndpoint = \"\"\n\n\tcase events.ReceiverStatus:\n\t\tc.IsStandBy = data.Status.IsStandBy\n\t\tc.IsActiveInput = data.Status.IsActiveInput\n\t\tc.Volume = data.Status.Volume.Level\n\t\tc.Muted = data.Status.Volume.Muted\n\n\t\/\/gocast.MediaEvent:\n\tdefault:\n\t\tlog.Warn(\"unexpected event %T: %#v\\n\", data, data)\n\t}\n\n\tc.publish()\n}\n<commit_msg>Added some error handling in chromecast node<commit_after>package main\n\nimport (\n\t\"net\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/stampzilla\/gocast\"\n\t\"github.com\/stampzilla\/gocast\/events\"\n\t\"github.com\/stampzilla\/gocast\/handlers\"\n)\n\ntype Chromecast struct {\n\tId string\n\tName_ string `json:\"Name\"`\n\n\tPrimaryApp string\n\tPrimaryEndpoint string\n\t\/\/PlaybackActive bool\n\t\/\/Paused bool\n\n\tIsStandBy bool\n\tIsActiveInput bool\n\n\tVolume float64\n\tMuted bool\n\n\tAddr net.IP\n\tPort int\n\n\tpublish func()\n\n\tmediaHandler *handlers.Media\n\tmediaConnectionHandler *handlers.Connection\n\n\t*gocast.Device\n}\n\nfunc NewChromecast(d *gocast.Device) *Chromecast {\n\tc := &Chromecast{\n\t\tDevice: d,\n\t}\n\n\td.OnEvent(c.Event)\n\terr := d.Connect()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn c\n\t}\n\n\tc.mediaHandler = &handlers.Media{}\n\tc.mediaConnectionHandler = &handlers.Connection{}\n\n\treturn c\n}\n\nfunc (c *Chromecast) Play() {\n\tc.mediaHandler.Play()\n}\nfunc (c *Chromecast) Pause() {\n\tc.mediaHandler.Pause()\n}\nfunc (c *Chromecast) Stop() {\n\tc.mediaHandler.Stop()\n}\n\nfunc (c *Chromecast) PlayUrl(url string, contentType string) {\n\terr := c.Device.ReceiverHandler.LaunchApp(gocast.AppMedia)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tif contentType == \"\" {\n\t\tcontentType = \"audio\/mpeg\"\n\t}\n\titem := handlers.MediaItem{\n\t\tContentId: url,\n\t\tStreamType: \"BUFFERED\",\n\t\tContentType: contentType,\n\t}\n\tc.mediaHandler.LoadMedia(item, 0, true, map[string]interface{}{})\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n}\n\nfunc (c *Chromecast) Listen() {\n}\n\nfunc (c *Chromecast) Event(event events.Event) {\n\tswitch data := event.(type) {\n\tcase events.Connected:\n\t\tlog.Info(c.Name(), \"- Connected, weeihoo\")\n\n\t\tc.Addr = c.Ip()\n\t\tc.Port = c.Device.Port()\n\t\tc.Id = c.Uuid()\n\t\tc.Name_ = c.Name()\n\n\t\tstate.Add(c)\n\tcase events.Disconnected:\n\t\tlog.Warn(c.Name(), \"- Disconnected, bah :\/\")\n\n\t\tstate.Remove(c)\n\n\t\tc.Device.Connect()\n\tcase events.AppStarted:\n\t\tlog.Info(c.Name(), \"- App started:\", data.DisplayName, \"(\", data.AppID, \")\")\n\t\t\/\/spew.Dump(\"Data:\", data)\n\n\t\tc.PrimaryApp = data.DisplayName\n\t\tc.PrimaryEndpoint = data.TransportId\n\n\t\t\/\/If the app supports media controls lets subscribe to it\n\t\tif data.HasNamespace(\"urn:x-cast:com.google.cast.media\") {\n\t\t\tc.Subscribe(\"urn:x-cast:com.google.cast.tp.connection\", data.TransportId, c.mediaConnectionHandler)\n\t\t\tc.Subscribe(\"urn:x-cast:com.google.cast.media\", data.TransportId, c.mediaHandler)\n\t\t}\n\n\tcase events.AppStopped:\n\t\tlog.Info(c.Name(), \"- App stopped:\", data.DisplayName, \"(\", data.AppID, \")\")\n\t\t\/\/spew.Dump(\"Data:\", data)\n\n\t\t\/\/unsubscribe from old channels\n\t\tfor _, v := range data.Namespaces {\n\t\t\tif v.Name == \"urn:x-cast:com.google.cast.media\" {\n\t\t\t\tc.mediaConnectionHandler.Disconnect()\n\t\t\t}\n\t\t\tc.UnsubscribeByUrnAndDestinationId(v.Name, data.TransportId)\n\t\t}\n\t\tc.PrimaryApp = \"\"\n\t\tc.PrimaryEndpoint = \"\"\n\n\tcase events.ReceiverStatus:\n\t\tc.IsStandBy = data.Status.IsStandBy\n\t\tc.IsActiveInput = data.Status.IsActiveInput\n\t\tc.Volume = data.Status.Volume.Level\n\t\tc.Muted = data.Status.Volume.Muted\n\n\t\/\/gocast.MediaEvent:\n\tdefault:\n\t\tlog.Warn(\"unexpected event %T: %#v\\n\", data, data)\n\t}\n\n\tc.publish()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build darwin\n\n\/*\n * Copyright (C) 2014-2017 Christian Muehlhaeuser\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n * Authors:\n * Christian Muehlhaeuser <muesli@gmail.com>\n *\/\n\n\/\/ Package notificationbee is a Bee that can trigger desktop notifications.\npackage notificationbee\n\n\/\/ Run executes the Bee's event loop.\nfunc (mod *NotificationBee) execAction(text string, urgency uint32) {\n\t\/\/FIXME: implement\n}\n<commit_msg>added notification wrapper for os x<commit_after>\/\/ +build darwin\n\n\/*\n * Copyright (C) 2014-2017 Christian Muehlhaeuser\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n * Authors:\n * Christian Muehlhaeuser <muesli@gmail.com>\n *\/\n\n\/\/ Package notificationbee is a Bee that can trigger desktop notifications.\npackage notificationbee\n\nimport \"github.com\/deckarep\/gosx-notifier\"\n\n\/\/ Run executes the Bee's event loop.\nfunc (mod *NotificationBee) execAction(text string, urgency uint32) {\n\tnote := gosxnotifier.NewNotification(text)\n\tnote.Subtitle = \"this is subtitle\"\n\tnote.Push()\n}\n<|endoftext|>"} {"text":"<commit_before>package epictest\n\nimport (\n\tcontext \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/code.google.com\/p\/go.net\/context\"\n\tdatastore \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-datastore\"\n\tsync \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-datastore\/sync\"\n\n\tblockstore \"github.com\/jbenet\/go-ipfs\/blocks\/blockstore\"\n\tblockservice \"github.com\/jbenet\/go-ipfs\/blockservice\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\texchange \"github.com\/jbenet\/go-ipfs\/exchange\"\n\tbitswap \"github.com\/jbenet\/go-ipfs\/exchange\/bitswap\"\n\tbsnet \"github.com\/jbenet\/go-ipfs\/exchange\/bitswap\/network\"\n\tmerkledag \"github.com\/jbenet\/go-ipfs\/merkledag\"\n\thost \"github.com\/jbenet\/go-ipfs\/p2p\/host\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/p2p\/peer\"\n\tdht \"github.com\/jbenet\/go-ipfs\/routing\/dht\"\n\t\"github.com\/jbenet\/go-ipfs\/util\/datastore2\"\n\tdelay \"github.com\/jbenet\/go-ipfs\/util\/delay\"\n\teventlog \"github.com\/jbenet\/go-ipfs\/util\/eventlog\"\n\ttestutil \"github.com\/jbenet\/go-ipfs\/util\/testutil\"\n)\n\nvar log = eventlog.Logger(\"epictest\")\n\n\/\/ TODO merge with core.IpfsNode\ntype Core struct {\n\t*core.IpfsNode\n}\n\nfunc (c *Core) ID() peer.ID {\n\treturn c.IpfsNode.Identity\n}\n\nfunc (c *Core) Bootstrap(ctx context.Context, p peer.PeerInfo) error {\n\treturn c.IpfsNode.Bootstrap(ctx, []peer.PeerInfo{p})\n}\n\nfunc makeCore(ctx context.Context, rf RepoFactory) (*Core, error) {\n\tnode, err := rf(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnode.Blocks, err = blockservice.New(node.Blockstore, node.Exchange)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnode.DAG = merkledag.NewDAGService(node.Blocks)\n\t\/\/ to make sure nothing is omitted, init each individual field and assign\n\t\/\/ all at once at the bottom.\n\treturn &Core{\n\t\tIpfsNode: node,\n\t}, nil\n}\n\ntype RepoFactory func(ctx context.Context) (*core.IpfsNode, error)\n\ntype Repo interface {\n\tID() peer.ID\n\tBlockstore() blockstore.Blockstore\n\tExchange() exchange.Interface\n\n\tBootstrap(ctx context.Context, peer peer.ID) error\n}\n\ntype repo struct {\n\t\/\/ DHT, Exchange, Network,Datastore\n\tbitSwapNetwork bsnet.BitSwapNetwork\n\tblockstore blockstore.Blockstore\n\texchange exchange.Interface\n\tdatastore datastore.ThreadSafeDatastore\n\thost host.Host\n\tdht *dht.IpfsDHT\n\tid peer.ID\n}\n\nfunc (r *repo) ID() peer.ID {\n\treturn r.id\n}\n\nfunc (c *repo) Bootstrap(ctx context.Context, p peer.ID) error {\n\treturn c.dht.Connect(ctx, p)\n}\n\nfunc (r *repo) Datastore() datastore.ThreadSafeDatastore {\n\treturn r.datastore\n}\n\nfunc (r *repo) Blockstore() blockstore.Blockstore {\n\treturn r.blockstore\n}\n\nfunc (r *repo) Exchange() exchange.Interface {\n\treturn r.exchange\n}\n\nfunc MocknetTestRepo(p peer.ID, h host.Host, conf testutil.LatencyConfig) RepoFactory {\n\treturn func(ctx context.Context) (*core.IpfsNode, error) {\n\t\tconst kWriteCacheElems = 100\n\t\tconst alwaysSendToPeer = true\n\t\tdsDelay := delay.Fixed(conf.BlockstoreLatency)\n\t\tds := datastore2.CloserWrap(sync.MutexWrap(datastore2.WithDelay(datastore.NewMapDatastore(), dsDelay)))\n\n\t\tlog.Debugf(\"MocknetTestRepo: %s %s %s\", p, h.ID(), h)\n\t\tdhtt := dht.NewDHT(ctx, h, ds)\n\t\tbsn := bsnet.NewFromIpfsHost(h, dhtt)\n\t\tbstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds), kWriteCacheElems)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texch := bitswap.New(ctx, p, bsn, bstore, alwaysSendToPeer)\n\t\treturn &core.IpfsNode{\n\t\t\tPeerstore: h.Peerstore(),\n\t\t\tBlockstore: bstore,\n\t\t\tExchange: exch,\n\t\t\tDatastore: ds,\n\t\t\tPeerHost: h,\n\t\t\tRouting: dhtt,\n\t\t\tIdentity: p,\n\t\t\tDHT: dhtt,\n\t\t}, nil\n\t}\n}\n<commit_msg>misc: move initialization sqaush<commit_after>package epictest\n\nimport (\n\tcontext \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/code.google.com\/p\/go.net\/context\"\n\tdatastore \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-datastore\"\n\tsync \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-datastore\/sync\"\n\n\tblockstore \"github.com\/jbenet\/go-ipfs\/blocks\/blockstore\"\n\tblockservice \"github.com\/jbenet\/go-ipfs\/blockservice\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\texchange \"github.com\/jbenet\/go-ipfs\/exchange\"\n\tbitswap \"github.com\/jbenet\/go-ipfs\/exchange\/bitswap\"\n\tbsnet \"github.com\/jbenet\/go-ipfs\/exchange\/bitswap\/network\"\n\tmerkledag \"github.com\/jbenet\/go-ipfs\/merkledag\"\n\thost \"github.com\/jbenet\/go-ipfs\/p2p\/host\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/p2p\/peer\"\n\tdht \"github.com\/jbenet\/go-ipfs\/routing\/dht\"\n\t\"github.com\/jbenet\/go-ipfs\/util\/datastore2\"\n\tdelay \"github.com\/jbenet\/go-ipfs\/util\/delay\"\n\teventlog \"github.com\/jbenet\/go-ipfs\/util\/eventlog\"\n\ttestutil \"github.com\/jbenet\/go-ipfs\/util\/testutil\"\n)\n\nvar log = eventlog.Logger(\"epictest\")\n\n\/\/ TODO merge with core.IpfsNode\ntype Core struct {\n\t*core.IpfsNode\n}\n\nfunc (c *Core) ID() peer.ID {\n\treturn c.IpfsNode.Identity\n}\n\nfunc (c *Core) Bootstrap(ctx context.Context, p peer.PeerInfo) error {\n\treturn c.IpfsNode.Bootstrap(ctx, []peer.PeerInfo{p})\n}\n\nfunc makeCore(ctx context.Context, rf RepoFactory) (*Core, error) {\n\tnode, err := rf(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ to make sure nothing is omitted, init each individual field and assign\n\t\/\/ all at once at the bottom.\n\treturn &Core{\n\t\tIpfsNode: node,\n\t}, nil\n}\n\ntype RepoFactory func(ctx context.Context) (*core.IpfsNode, error)\n\ntype Repo interface {\n\tID() peer.ID\n\tBlockstore() blockstore.Blockstore\n\tExchange() exchange.Interface\n\n\tBootstrap(ctx context.Context, peer peer.ID) error\n}\n\ntype repo struct {\n\t\/\/ DHT, Exchange, Network,Datastore\n\tbitSwapNetwork bsnet.BitSwapNetwork\n\tblockstore blockstore.Blockstore\n\texchange exchange.Interface\n\tdatastore datastore.ThreadSafeDatastore\n\thost host.Host\n\tdht *dht.IpfsDHT\n\tid peer.ID\n}\n\nfunc (r *repo) ID() peer.ID {\n\treturn r.id\n}\n\nfunc (c *repo) Bootstrap(ctx context.Context, p peer.ID) error {\n\treturn c.dht.Connect(ctx, p)\n}\n\nfunc (r *repo) Datastore() datastore.ThreadSafeDatastore {\n\treturn r.datastore\n}\n\nfunc (r *repo) Blockstore() blockstore.Blockstore {\n\treturn r.blockstore\n}\n\nfunc (r *repo) Exchange() exchange.Interface {\n\treturn r.exchange\n}\n\nfunc MocknetTestRepo(p peer.ID, h host.Host, conf testutil.LatencyConfig) RepoFactory {\n\treturn func(ctx context.Context) (*core.IpfsNode, error) {\n\t\tconst kWriteCacheElems = 100\n\t\tconst alwaysSendToPeer = true\n\t\tdsDelay := delay.Fixed(conf.BlockstoreLatency)\n\t\tds := datastore2.CloserWrap(sync.MutexWrap(datastore2.WithDelay(datastore.NewMapDatastore(), dsDelay)))\n\n\t\tlog.Debugf(\"MocknetTestRepo: %s %s %s\", p, h.ID(), h)\n\t\tdhtt := dht.NewDHT(ctx, h, ds)\n\t\tbsn := bsnet.NewFromIpfsHost(h, dhtt)\n\t\tbstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds), kWriteCacheElems)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texch := bitswap.New(ctx, p, bsn, bstore, alwaysSendToPeer)\n\t\tblockservice, err := blockservice.New(bstore, exch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn &core.IpfsNode{\n\t\t\tPeerstore: h.Peerstore(),\n\t\t\tBlockstore: bstore,\n\t\t\tExchange: exch,\n\t\t\tDatastore: ds,\n\t\t\tPeerHost: h,\n\t\t\tDAG: merkledag.NewDAGService(blockservice),\n\t\t\tBlocks: blockservice,\n\t\t\tRouting: dhtt,\n\t\t\tIdentity: p,\n\t\t\tDHT: dhtt,\n\t\t}, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage framework\n\nimport (\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/go-openapi\/spec\"\n\t\"github.com\/pborman\/uuid\"\n\t\"k8s.io\/klog\"\n\n\tapps \"k8s.io\/api\/apps\/v1beta1\"\n\tauditreg \"k8s.io\/api\/auditregistration\/v1alpha1\"\n\tautoscaling \"k8s.io\/api\/autoscaling\/v1\"\n\tcertificates \"k8s.io\/api\/certificates\/v1beta1\"\n\t\"k8s.io\/api\/core\/v1\"\n\textensions \"k8s.io\/api\/extensions\/v1beta1\"\n\trbac \"k8s.io\/api\/rbac\/v1alpha1\"\n\tstorage \"k8s.io\/api\/storage\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tauthauthenticator \"k8s.io\/apiserver\/pkg\/authentication\/authenticator\"\n\t\"k8s.io\/apiserver\/pkg\/authentication\/authenticatorfactory\"\n\tauthenticatorunion \"k8s.io\/apiserver\/pkg\/authentication\/request\/union\"\n\t\"k8s.io\/apiserver\/pkg\/authentication\/user\"\n\t\"k8s.io\/apiserver\/pkg\/authorization\/authorizer\"\n\t\"k8s.io\/apiserver\/pkg\/authorization\/authorizerfactory\"\n\tauthorizerunion \"k8s.io\/apiserver\/pkg\/authorization\/union\"\n\topenapinamer \"k8s.io\/apiserver\/pkg\/endpoints\/openapi\"\n\tgenericapiserver \"k8s.io\/apiserver\/pkg\/server\"\n\t\"k8s.io\/apiserver\/pkg\/server\/options\"\n\tserverstorage \"k8s.io\/apiserver\/pkg\/server\/storage\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/storagebackend\"\n\t\"k8s.io\/client-go\/informers\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\trestclient \"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/legacyscheme\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/testapi\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/batch\"\n\tpolicy \"k8s.io\/kubernetes\/pkg\/apis\/policy\/v1beta1\"\n\t\"k8s.io\/kubernetes\/pkg\/generated\/openapi\"\n\tkubeletclient \"k8s.io\/kubernetes\/pkg\/kubelet\/client\"\n\t\"k8s.io\/kubernetes\/pkg\/master\"\n\t\"k8s.io\/kubernetes\/pkg\/version\"\n)\n\n\/\/ Config is a struct of configuration directives for NewMasterComponents.\ntype Config struct {\n\t\/\/ If nil, a default is used, partially filled configs will not get populated.\n\tMasterConfig *master.Config\n\tStartReplicationManager bool\n\t\/\/ Client throttling qps\n\tQPS float32\n\t\/\/ Client burst qps, also burst replicas allowed in rc manager\n\tBurst int\n\t\/\/ TODO: Add configs for endpoints controller, scheduler etc\n}\n\n\/\/ alwaysAllow always allows an action\ntype alwaysAllow struct{}\n\nfunc (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {\n\treturn authorizer.DecisionAllow, \"always allow\", nil\n}\n\n\/\/ alwaysEmpty simulates \"no authentication\" for old tests\nfunc alwaysEmpty(req *http.Request) (*authauthenticator.Response, bool, error) {\n\treturn &authauthenticator.Response{\n\t\tUser: &user.DefaultInfo{\n\t\t\tName: \"\",\n\t\t},\n\t}, true, nil\n}\n\n\/\/ MasterReceiver can be used to provide the master to a custom incoming server function\ntype MasterReceiver interface {\n\tSetMaster(m *master.Master)\n}\n\n\/\/ MasterHolder implements\ntype MasterHolder struct {\n\tInitialized chan struct{}\n\tM *master.Master\n}\n\nfunc (h *MasterHolder) SetMaster(m *master.Master) {\n\th.M = m\n\tclose(h.Initialized)\n}\n\n\/\/ startMasterOrDie starts a kubernetes master and an httpserver to handle api requests\nfunc startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {\n\tvar m *master.Master\n\tvar s *httptest.Server\n\n\tif incomingServer != nil {\n\t\ts = incomingServer\n\t} else {\n\t\ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tm.GenericAPIServer.Handler.ServeHTTP(w, req)\n\t\t}))\n\t}\n\n\tstopCh := make(chan struct{})\n\tcloseFn := func() {\n\t\tm.GenericAPIServer.RunPreShutdownHooks()\n\t\tclose(stopCh)\n\t\ts.Close()\n\t}\n\n\tif masterConfig == nil {\n\t\tmasterConfig = NewMasterConfig()\n\t\tmasterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))\n\t\tmasterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: \"Kubernetes\",\n\t\t\t\tVersion: \"unversioned\",\n\t\t\t},\n\t\t}\n\t\tmasterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{\n\t\t\tResponseProps: spec.ResponseProps{\n\t\t\t\tDescription: \"Default Response.\",\n\t\t\t},\n\t\t}\n\t\tmasterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions\n\t\tmasterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()\n\t}\n\n\t\/\/ set the loopback client config\n\tif masterConfig.GenericConfig.LoopbackClientConfig == nil {\n\t\tmasterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}\n\t}\n\tmasterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL\n\n\tprivilegedLoopbackToken := uuid.NewRandom().String()\n\t\/\/ wrap any available authorizer\n\ttokens := make(map[string]*user.DefaultInfo)\n\ttokens[privilegedLoopbackToken] = &user.DefaultInfo{\n\t\tName: user.APIServerUser,\n\t\tUID: uuid.NewRandom().String(),\n\t\tGroups: []string{user.SystemPrivilegedGroup},\n\t}\n\n\ttokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)\n\tif masterConfig.GenericConfig.Authentication.Authenticator == nil {\n\t\tmasterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))\n\t} else {\n\t\tmasterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator)\n\t}\n\n\tif masterConfig.GenericConfig.Authorization.Authorizer != nil {\n\t\ttokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)\n\t\tmasterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer)\n\t} else {\n\t\tmasterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{}\n\t}\n\n\tmasterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken\n\n\tclientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\tmasterConfig.ExtraConfig.VersionedInformers = informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)\n\tm, err = masterConfig.Complete().New(genericapiserver.NewEmptyDelegate())\n\tif err != nil {\n\t\tcloseFn()\n\t\tklog.Fatalf(\"error in bringing up the master: %v\", err)\n\t}\n\tif masterReceiver != nil {\n\t\tmasterReceiver.SetMaster(m)\n\t}\n\n\t\/\/ TODO have this start method actually use the normal start sequence for the API server\n\t\/\/ this method never actually calls the `Run` method for the API server\n\t\/\/ fire the post hooks ourselves\n\tm.GenericAPIServer.PrepareRun()\n\tm.GenericAPIServer.RunPostStartHooks(stopCh)\n\n\tcfg := *masterConfig.GenericConfig.LoopbackClientConfig\n\tcfg.ContentConfig.GroupVersion = &schema.GroupVersion{}\n\tprivilegedClient, err := restclient.RESTClientFor(&cfg)\n\tif err != nil {\n\t\tcloseFn()\n\t\tklog.Fatal(err)\n\t}\n\tvar lastHealthContent []byte\n\terr = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {\n\t\tresult := privilegedClient.Get().AbsPath(\"\/healthz\").Do()\n\t\tstatus := 0\n\t\tresult.StatusCode(&status)\n\t\tif status == 200 {\n\t\t\treturn true, nil\n\t\t}\n\t\tlastHealthContent, _ = result.Raw()\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tcloseFn()\n\t\tklog.Errorf(\"last health content: %q\", string(lastHealthContent))\n\t\tklog.Fatal(err)\n\t}\n\n\treturn m, s, closeFn\n}\n\n\/\/ Returns the master config appropriate for most integration tests.\nfunc NewIntegrationTestMasterConfig() *master.Config {\n\tmasterConfig := NewMasterConfig()\n\tmasterConfig.GenericConfig.PublicAddress = net.ParseIP(\"192.168.10.4\")\n\tmasterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()\n\n\t\/\/ TODO: get rid of these tests or port them to secure serving\n\tmasterConfig.GenericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}\n\n\treturn masterConfig\n}\n\n\/\/ Returns a basic master config.\nfunc NewMasterConfig() *master.Config {\n\t\/\/ This causes the integration tests to exercise the etcd\n\t\/\/ prefix code, so please don't change without ensuring\n\t\/\/ sufficient coverage in other ways.\n\tetcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil))\n\tetcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()}\n\n\tinfo, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)\n\tns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info)\n\n\tresourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Scheme)\n\t\/\/ FIXME (soltysh): this GroupVersionResource override should be configurable\n\t\/\/ we need to set both for the whole group and for cronjobs, separately\n\tresourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})\n\tresourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: \"cronjobs\"}, schema.GroupVersion{Group: batch.GroupName, Version: \"v1beta1\"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})\n\t\/\/ we also need to set both for the storage group and for volumeattachments, separately\n\tresourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})\n\tresourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: \"volumeattachments\"}, schema.GroupVersion{Group: storage.GroupName, Version: \"v1beta1\"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})\n\n\tstorageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: auditreg.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\n\tgenericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)\n\tkubeVersion := version.Get()\n\tgenericConfig.Version = &kubeVersion\n\tgenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()\n\n\t\/\/ TODO: get rid of these tests or port them to secure serving\n\tgenericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}\n\n\terr := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &master.Config{\n\t\tGenericConfig: genericConfig,\n\t\tExtraConfig: master.ExtraConfig{\n\t\t\tAPIResourceConfigSource: master.DefaultAPIResourceConfigSource(),\n\t\t\tStorageFactory: storageFactory,\n\t\t\tKubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},\n\t\t\tAPIServerServicePort: 443,\n\t\t\tMasterCount: 1,\n\t\t},\n\t}\n}\n\n\/\/ CloseFunc can be called to cleanup the master\ntype CloseFunc func()\n\nfunc RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {\n\tif masterConfig == nil {\n\t\tmasterConfig = NewMasterConfig()\n\t\tmasterConfig.GenericConfig.EnableProfiling = true\n\t}\n\treturn startMasterOrDie(masterConfig, nil, nil)\n}\n\nfunc RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {\n\treturn startMasterOrDie(masterConfig, s, masterReceiver)\n}\n\n\/\/ SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.\nfunc SharedEtcd() *storagebackend.Config {\n\tcfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), \"registry\"), nil)\n\tcfg.ServerList = []string{GetEtcdURL()}\n\treturn cfg\n}\n\ntype fakeLocalhost443Listener struct{}\n\nfunc (fakeLocalhost443Listener) Accept() (net.Conn, error) {\n\treturn nil, nil\n}\n\nfunc (fakeLocalhost443Listener) Close() error {\n\treturn nil\n}\n\nfunc (fakeLocalhost443Listener) Addr() net.Addr {\n\treturn &net.TCPAddr{\n\t\tIP: net.IPv4(127, 0, 0, 1),\n\t\tPort: 443,\n\t}\n}\n<commit_msg>Ensure we capture http and trace related logs<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage framework\n\nimport (\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"path\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/go-openapi\/spec\"\n\t\"github.com\/pborman\/uuid\"\n\t\"k8s.io\/klog\"\n\n\tapps \"k8s.io\/api\/apps\/v1beta1\"\n\tauditreg \"k8s.io\/api\/auditregistration\/v1alpha1\"\n\tautoscaling \"k8s.io\/api\/autoscaling\/v1\"\n\tcertificates \"k8s.io\/api\/certificates\/v1beta1\"\n\t\"k8s.io\/api\/core\/v1\"\n\textensions \"k8s.io\/api\/extensions\/v1beta1\"\n\trbac \"k8s.io\/api\/rbac\/v1alpha1\"\n\tstorage \"k8s.io\/api\/storage\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tauthauthenticator \"k8s.io\/apiserver\/pkg\/authentication\/authenticator\"\n\t\"k8s.io\/apiserver\/pkg\/authentication\/authenticatorfactory\"\n\tauthenticatorunion \"k8s.io\/apiserver\/pkg\/authentication\/request\/union\"\n\t\"k8s.io\/apiserver\/pkg\/authentication\/user\"\n\t\"k8s.io\/apiserver\/pkg\/authorization\/authorizer\"\n\t\"k8s.io\/apiserver\/pkg\/authorization\/authorizerfactory\"\n\tauthorizerunion \"k8s.io\/apiserver\/pkg\/authorization\/union\"\n\topenapinamer \"k8s.io\/apiserver\/pkg\/endpoints\/openapi\"\n\tgenericapiserver \"k8s.io\/apiserver\/pkg\/server\"\n\t\"k8s.io\/apiserver\/pkg\/server\/options\"\n\tserverstorage \"k8s.io\/apiserver\/pkg\/server\/storage\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/storagebackend\"\n\t\"k8s.io\/client-go\/informers\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\trestclient \"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/legacyscheme\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/testapi\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/batch\"\n\tpolicy \"k8s.io\/kubernetes\/pkg\/apis\/policy\/v1beta1\"\n\t\"k8s.io\/kubernetes\/pkg\/generated\/openapi\"\n\tkubeletclient \"k8s.io\/kubernetes\/pkg\/kubelet\/client\"\n\t\"k8s.io\/kubernetes\/pkg\/master\"\n\t\"k8s.io\/kubernetes\/pkg\/version\"\n)\n\n\/\/ Config is a struct of configuration directives for NewMasterComponents.\ntype Config struct {\n\t\/\/ If nil, a default is used, partially filled configs will not get populated.\n\tMasterConfig *master.Config\n\tStartReplicationManager bool\n\t\/\/ Client throttling qps\n\tQPS float32\n\t\/\/ Client burst qps, also burst replicas allowed in rc manager\n\tBurst int\n\t\/\/ TODO: Add configs for endpoints controller, scheduler etc\n}\n\n\/\/ alwaysAllow always allows an action\ntype alwaysAllow struct{}\n\nfunc (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {\n\treturn authorizer.DecisionAllow, \"always allow\", nil\n}\n\n\/\/ alwaysEmpty simulates \"no authentication\" for old tests\nfunc alwaysEmpty(req *http.Request) (*authauthenticator.Response, bool, error) {\n\treturn &authauthenticator.Response{\n\t\tUser: &user.DefaultInfo{\n\t\t\tName: \"\",\n\t\t},\n\t}, true, nil\n}\n\n\/\/ MasterReceiver can be used to provide the master to a custom incoming server function\ntype MasterReceiver interface {\n\tSetMaster(m *master.Master)\n}\n\n\/\/ MasterHolder implements\ntype MasterHolder struct {\n\tInitialized chan struct{}\n\tM *master.Master\n}\n\nfunc (h *MasterHolder) SetMaster(m *master.Master) {\n\th.M = m\n\tclose(h.Initialized)\n}\n\n\/\/ startMasterOrDie starts a kubernetes master and an httpserver to handle api requests\nfunc startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {\n\tvar m *master.Master\n\tvar s *httptest.Server\n\n\t\/\/ Ensure we log at least level 4\n\tv := flag.Lookup(\"v\").Value\n\tlevel, _ := strconv.Atoi(v.String())\n\tif level < 4 {\n\t\tv.Set(\"4\")\n\t}\n\n\tif incomingServer != nil {\n\t\ts = incomingServer\n\t} else {\n\t\ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tm.GenericAPIServer.Handler.ServeHTTP(w, req)\n\t\t}))\n\t}\n\n\tstopCh := make(chan struct{})\n\tcloseFn := func() {\n\t\tm.GenericAPIServer.RunPreShutdownHooks()\n\t\tclose(stopCh)\n\t\ts.Close()\n\t}\n\n\tif masterConfig == nil {\n\t\tmasterConfig = NewMasterConfig()\n\t\tmasterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme))\n\t\tmasterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: \"Kubernetes\",\n\t\t\t\tVersion: \"unversioned\",\n\t\t\t},\n\t\t}\n\t\tmasterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{\n\t\t\tResponseProps: spec.ResponseProps{\n\t\t\t\tDescription: \"Default Response.\",\n\t\t\t},\n\t\t}\n\t\tmasterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions\n\t\tmasterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()\n\t}\n\n\t\/\/ set the loopback client config\n\tif masterConfig.GenericConfig.LoopbackClientConfig == nil {\n\t\tmasterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}\n\t}\n\tmasterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL\n\n\tprivilegedLoopbackToken := uuid.NewRandom().String()\n\t\/\/ wrap any available authorizer\n\ttokens := make(map[string]*user.DefaultInfo)\n\ttokens[privilegedLoopbackToken] = &user.DefaultInfo{\n\t\tName: user.APIServerUser,\n\t\tUID: uuid.NewRandom().String(),\n\t\tGroups: []string{user.SystemPrivilegedGroup},\n\t}\n\n\ttokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)\n\tif masterConfig.GenericConfig.Authentication.Authenticator == nil {\n\t\tmasterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))\n\t} else {\n\t\tmasterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator)\n\t}\n\n\tif masterConfig.GenericConfig.Authorization.Authorizer != nil {\n\t\ttokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)\n\t\tmasterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer)\n\t} else {\n\t\tmasterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{}\n\t}\n\n\tmasterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken\n\n\tclientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\tmasterConfig.ExtraConfig.VersionedInformers = informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)\n\tm, err = masterConfig.Complete().New(genericapiserver.NewEmptyDelegate())\n\tif err != nil {\n\t\tcloseFn()\n\t\tklog.Fatalf(\"error in bringing up the master: %v\", err)\n\t}\n\tif masterReceiver != nil {\n\t\tmasterReceiver.SetMaster(m)\n\t}\n\n\t\/\/ TODO have this start method actually use the normal start sequence for the API server\n\t\/\/ this method never actually calls the `Run` method for the API server\n\t\/\/ fire the post hooks ourselves\n\tm.GenericAPIServer.PrepareRun()\n\tm.GenericAPIServer.RunPostStartHooks(stopCh)\n\n\tcfg := *masterConfig.GenericConfig.LoopbackClientConfig\n\tcfg.ContentConfig.GroupVersion = &schema.GroupVersion{}\n\tprivilegedClient, err := restclient.RESTClientFor(&cfg)\n\tif err != nil {\n\t\tcloseFn()\n\t\tklog.Fatal(err)\n\t}\n\tvar lastHealthContent []byte\n\terr = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {\n\t\tresult := privilegedClient.Get().AbsPath(\"\/healthz\").Do()\n\t\tstatus := 0\n\t\tresult.StatusCode(&status)\n\t\tif status == 200 {\n\t\t\treturn true, nil\n\t\t}\n\t\tlastHealthContent, _ = result.Raw()\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tcloseFn()\n\t\tklog.Errorf(\"last health content: %q\", string(lastHealthContent))\n\t\tklog.Fatal(err)\n\t}\n\n\treturn m, s, closeFn\n}\n\n\/\/ Returns the master config appropriate for most integration tests.\nfunc NewIntegrationTestMasterConfig() *master.Config {\n\tmasterConfig := NewMasterConfig()\n\tmasterConfig.GenericConfig.PublicAddress = net.ParseIP(\"192.168.10.4\")\n\tmasterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()\n\n\t\/\/ TODO: get rid of these tests or port them to secure serving\n\tmasterConfig.GenericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}\n\n\treturn masterConfig\n}\n\n\/\/ Returns a basic master config.\nfunc NewMasterConfig() *master.Config {\n\t\/\/ This causes the integration tests to exercise the etcd\n\t\/\/ prefix code, so please don't change without ensuring\n\t\/\/ sufficient coverage in other ways.\n\tetcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil))\n\tetcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()}\n\n\tinfo, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)\n\tns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info)\n\n\tresourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Scheme)\n\t\/\/ FIXME (soltysh): this GroupVersionResource override should be configurable\n\t\/\/ we need to set both for the whole group and for cronjobs, separately\n\tresourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})\n\tresourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: \"cronjobs\"}, schema.GroupVersion{Group: batch.GroupName, Version: \"v1beta1\"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})\n\t\/\/ we also need to set both for the storage group and for volumeattachments, separately\n\tresourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})\n\tresourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: \"volumeattachments\"}, schema.GroupVersion{Group: storage.GroupName, Version: \"v1beta1\"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})\n\n\tstorageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\tstorageFactory.SetSerializer(\n\t\tschema.GroupResource{Group: auditreg.GroupName, Resource: serverstorage.AllResources},\n\t\t\"\",\n\t\tns)\n\n\tgenericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)\n\tkubeVersion := version.Get()\n\tgenericConfig.Version = &kubeVersion\n\tgenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()\n\n\t\/\/ TODO: get rid of these tests or port them to secure serving\n\tgenericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}\n\n\terr := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &master.Config{\n\t\tGenericConfig: genericConfig,\n\t\tExtraConfig: master.ExtraConfig{\n\t\t\tAPIResourceConfigSource: master.DefaultAPIResourceConfigSource(),\n\t\t\tStorageFactory: storageFactory,\n\t\t\tKubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},\n\t\t\tAPIServerServicePort: 443,\n\t\t\tMasterCount: 1,\n\t\t},\n\t}\n}\n\n\/\/ CloseFunc can be called to cleanup the master\ntype CloseFunc func()\n\nfunc RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {\n\tif masterConfig == nil {\n\t\tmasterConfig = NewMasterConfig()\n\t\tmasterConfig.GenericConfig.EnableProfiling = true\n\t}\n\treturn startMasterOrDie(masterConfig, nil, nil)\n}\n\nfunc RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {\n\treturn startMasterOrDie(masterConfig, s, masterReceiver)\n}\n\n\/\/ SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.\nfunc SharedEtcd() *storagebackend.Config {\n\tcfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), \"registry\"), nil)\n\tcfg.ServerList = []string{GetEtcdURL()}\n\treturn cfg\n}\n\ntype fakeLocalhost443Listener struct{}\n\nfunc (fakeLocalhost443Listener) Accept() (net.Conn, error) {\n\treturn nil, nil\n}\n\nfunc (fakeLocalhost443Listener) Close() error {\n\treturn nil\n}\n\nfunc (fakeLocalhost443Listener) Addr() net.Addr {\n\treturn &net.TCPAddr{\n\t\tIP: net.IPv4(127, 0, 0, 1),\n\t\tPort: 443,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package consul\n\nimport (\n\t\"net\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/simia-tech\/netx\"\n)\n\ntype listener struct {\n\tlistener net.Listener\n\tconsul *consul\n\tid string\n\taddress string\n}\n\nfunc init() {\n\tnetx.RegisterListen(\"consul\", Listen)\n}\n\n\/\/ Listen starts a local tcp listener and registers its address and port under\n\/\/ the provided address to the consul instance that is specified in the provided options.\nfunc Listen(address string, options *netx.Options) (net.Listener, error) {\n\tconsul, err := newConsulFrom(options.Nodes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl, err := net.Listen(\"tcp\", options.PublicAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tid, err := consul.register(address, l.Addr())\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"register local listener address [%s] at consul failed\", l.Addr())\n\t}\n\n\treturn &listener{\n\t\tlistener: l,\n\t\tconsul: consul,\n\t\tid: id,\n\t\taddress: address,\n\t}, nil\n}\n\nfunc (l *listener) Accept() (net.Conn, error) {\n\treturn l.listener.Accept()\n}\n\nfunc (l *listener) Close() error {\n\tif err := l.consul.deregister(l.id); err != nil {\n\t\treturn err\n\t}\n\treturn l.listener.Close()\n}\n\nfunc (l *listener) Addr() net.Addr {\n\treturn &addr{address: l.address}\n}\n<commit_msg>added missing code<commit_after>package consul\n\nimport (\n\t\"net\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/simia-tech\/netx\"\n)\n\ntype listener struct {\n\tlistener net.Listener\n\tconsul *consul\n\tid string\n\taddress string\n}\n\nfunc init() {\n\tnetx.RegisterListen(\"consul\", Listen)\n}\n\n\/\/ Listen starts a local tcp listener and registers its address and port under\n\/\/ the provided address to the consul instance that is specified in the provided options.\nfunc Listen(address string, options *netx.Options) (net.Listener, error) {\n\tconsul, err := newConsulFrom(options.Nodes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl := options.PublicListener\n\tif l == nil {\n\t\tl, err = net.Listen(\"tcp\", options.PublicAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tid, err := consul.register(address, l.Addr())\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"register local listener address [%s] at consul failed\", l.Addr())\n\t}\n\n\treturn &listener{\n\t\tlistener: l,\n\t\tconsul: consul,\n\t\tid: id,\n\t\taddress: address,\n\t}, nil\n}\n\nfunc (l *listener) Accept() (net.Conn, error) {\n\treturn l.listener.Accept()\n}\n\nfunc (l *listener) Close() error {\n\tif err := l.consul.deregister(l.id); err != nil {\n\t\treturn err\n\t}\n\treturn l.listener.Close()\n}\n\nfunc (l *listener) Addr() net.Addr {\n\treturn &addr{address: l.address}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype tester struct {\n\tfailures []failure\n\tcluster *cluster\n\tlimit int\n\n\tstatus Status\n}\n\nfunc (tt *tester) runLoop() {\n\ttt.status.Since = time.Now()\n\ttt.status.RoundLimit = tt.limit\n\ttt.status.cluster = tt.cluster\n\tfor _, f := range tt.failures {\n\t\ttt.status.Failures = append(tt.status.Failures, f.Desc())\n\t}\n\tfor i := 0; i < tt.limit; i++ {\n\t\ttt.status.setRound(i)\n\t\troundTotalCounter.Inc()\n\n\t\tvar (\n\t\t\tcurrentRevision int64\n\t\t\tsuccess bool\n\t\t)\n\t\tfor j, f := range tt.failures {\n\t\t\tcaseTotalCounter.WithLabelValues(f.Desc()).Inc()\n\n\t\t\ttt.status.setCase(j)\n\n\t\t\tif err := tt.cluster.WaitHealth(); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] wait full health error: %v\", i, j, err)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] start failure %s\", i, j, f.Desc())\n\n\t\t\tplog.Printf(\"[round#%d case#%d] start injecting failure...\", i, j)\n\t\t\tif err := f.Inject(tt.cluster, i); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] injection error: %v\", i, j, err)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] injected failure\", i, j)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] start recovering failure...\", i, j)\n\t\t\tif err := f.Recover(tt.cluster, i); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] recovery error: %v\", i, j, err)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] recovered failure\", i, j)\n\n\t\t\tif tt.cluster.v2Only {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] succeed!\", i, j)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tplog.Printf(\"[round#%d case#%d] canceling the stressers...\", i, j)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\ts.Cancel()\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] canceled stressers\", i, j)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] checking current revisions...\", i, j)\n\t\t\tvar (\n\t\t\t\trevs map[string]int64\n\t\t\t\thashes map[string]int64\n\t\t\t\trerr error\n\t\t\t\tok bool\n\t\t\t)\n\t\t\tfor k := 0; k < 5; k++ {\n\t\t\t\ttime.Sleep(time.Second)\n\n\t\t\t\trevs, hashes, rerr = tt.cluster.getRevisionHash()\n\t\t\t\tif rerr != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d.%d] failed to get current revisions (%v)\", i, j, k, rerr)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif currentRevision, ok = getSameValue(revs); ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tplog.Printf(\"[round#%d case#%d.%d] inconsistent current revisions %+v\", i, j, k, revs)\n\t\t\t}\n\t\t\tif !ok || rerr != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] checking current revisions failed [revisions: %v]\", i, j, revs)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] all members are consistent with current revisions [revisions: %v]\", i, j, revs)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] checking current storage hashes...\", i, j)\n\t\t\tif _, ok = getSameValue(hashes); !ok {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] checking current storage hashes failed [hashes: %v]\", i, j, hashes)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] all members are consistent with storage hashes\", i, j)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] restarting the stressers...\", i, j)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\tgo s.Stress()\n\t\t\t}\n\n\t\t\tplog.Printf(\"[round#%d case#%d] succeed!\", i, j)\n\t\t\tsuccess = true\n\t\t}\n\n\t\tif !success {\n\t\t\tcontinue\n\t\t}\n\t\trevToCompact := max(0, currentRevision-10000)\n\t\tplog.Printf(\"[round#%d] compacting storage at %d (current revision %d)\", i, revToCompact, currentRevision)\n\t\tif err := tt.cluster.compactKV(revToCompact); err != nil {\n\t\t\tplog.Printf(\"[round#%d] compactKV error (%v)\", i, err)\n\t\t\tif err := tt.cleanup(i, 0); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d] cleanup error: %v\", i, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tplog.Printf(\"[round#%d] compacted storage\", i)\n\n\t\tplog.Printf(\"[round#%d] check compaction at %d\", i, revToCompact)\n\t\tif err := tt.cluster.checkCompact(revToCompact); err != nil {\n\t\t\tplog.Printf(\"[round#%d] checkCompact error (%v)\", i, err)\n\t\t\tif err := tt.cleanup(i, 0); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d] cleanup error: %v\", i, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tplog.Printf(\"[round#%d] confirmed compaction at %d\", i, revToCompact)\n\n\t\tif i > 0 && i%500 == 0 { \/\/ every 500 rounds\n\t\t\tplog.Printf(\"[round#%d] canceling the stressers...\", i)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\ts.Cancel()\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d] canceled stressers\", i)\n\n\t\t\tplog.Printf(\"[round#%d] deframenting...\", i)\n\t\t\tif err := tt.cluster.defrag(); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d] defrag error (%v)\", i, err)\n\t\t\t\tif err := tt.cleanup(i, 0); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d] cleanup error: %v\", i, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d] deframented...\", i)\n\n\t\t\tplog.Printf(\"[round#%d] restarting the stressers...\", i)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\tgo s.Stress()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (tt *tester) cleanup(i, j int) error {\n\troundFailedTotalCounter.Inc()\n\tcaseFailedTotalCounter.WithLabelValues(tt.failures[j].Desc()).Inc()\n\n\tplog.Printf(\"[round#%d case#%d] cleaning up...\", i, j)\n\tif err := tt.cluster.Cleanup(); err != nil {\n\t\treturn err\n\t}\n\treturn tt.cluster.Bootstrap()\n}\n\ntype Status struct {\n\tSince time.Time\n\tFailures []string\n\tRoundLimit int\n\n\tCluster ClusterStatus\n\tcluster *cluster\n\n\tmu sync.Mutex \/\/ guards Round and Case\n\tRound int\n\tCase int\n}\n\nfunc (s *Status) setRound(r int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.Round = r\n}\n\nfunc (s *Status) setCase(c int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.Case = c\n}\n<commit_msg>etcd-tester: change var success->failed<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype tester struct {\n\tfailures []failure\n\tcluster *cluster\n\tlimit int\n\n\tstatus Status\n}\n\nfunc (tt *tester) runLoop() {\n\ttt.status.Since = time.Now()\n\ttt.status.RoundLimit = tt.limit\n\ttt.status.cluster = tt.cluster\n\tfor _, f := range tt.failures {\n\t\ttt.status.Failures = append(tt.status.Failures, f.Desc())\n\t}\n\tfor i := 0; i < tt.limit; i++ {\n\t\ttt.status.setRound(i)\n\t\troundTotalCounter.Inc()\n\n\t\tvar (\n\t\t\tcurrentRevision int64\n\t\t\tfailed bool\n\t\t)\n\t\tfor j, f := range tt.failures {\n\t\t\tcaseTotalCounter.WithLabelValues(f.Desc()).Inc()\n\t\t\ttt.status.setCase(j)\n\n\t\t\tif err := tt.cluster.WaitHealth(); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] wait full health error: %v\", i, j, err)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfailed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] start failure %s\", i, j, f.Desc())\n\n\t\t\tplog.Printf(\"[round#%d case#%d] start injecting failure...\", i, j)\n\t\t\tif err := f.Inject(tt.cluster, i); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] injection error: %v\", i, j, err)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfailed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] injected failure\", i, j)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] start recovering failure...\", i, j)\n\t\t\tif err := f.Recover(tt.cluster, i); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] recovery error: %v\", i, j, err)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfailed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] recovered failure\", i, j)\n\n\t\t\tif tt.cluster.v2Only {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] succeed!\", i, j)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tplog.Printf(\"[round#%d case#%d] canceling the stressers...\", i, j)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\ts.Cancel()\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] canceled stressers\", i, j)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] checking current revisions...\", i, j)\n\t\t\tvar (\n\t\t\t\trevs map[string]int64\n\t\t\t\thashes map[string]int64\n\t\t\t\trerr error\n\t\t\t\tok bool\n\t\t\t)\n\t\t\tfor k := 0; k < 5; k++ {\n\t\t\t\ttime.Sleep(time.Second)\n\n\t\t\t\trevs, hashes, rerr = tt.cluster.getRevisionHash()\n\t\t\t\tif rerr != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d.%d] failed to get current revisions (%v)\", i, j, k, rerr)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif currentRevision, ok = getSameValue(revs); ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tplog.Printf(\"[round#%d case#%d.%d] inconsistent current revisions %+v\", i, j, k, revs)\n\t\t\t}\n\t\t\tif !ok || rerr != nil {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] checking current revisions failed [revisions: %v]\", i, j, revs)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfailed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] all members are consistent with current revisions [revisions: %v]\", i, j, revs)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] checking current storage hashes...\", i, j)\n\t\t\tif _, ok = getSameValue(hashes); !ok {\n\t\t\t\tplog.Printf(\"[round#%d case#%d] checking current storage hashes failed [hashes: %v]\", i, j, hashes)\n\t\t\t\tif err := tt.cleanup(i, j); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d case#%d] cleanup error: %v\", i, j, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfailed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d case#%d] all members are consistent with storage hashes\", i, j)\n\n\t\t\tplog.Printf(\"[round#%d case#%d] restarting the stressers...\", i, j)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\tgo s.Stress()\n\t\t\t}\n\n\t\t\tplog.Printf(\"[round#%d case#%d] succeed!\", i, j)\n\t\t}\n\n\t\tif failed {\n\t\t\tcontinue\n\t\t}\n\t\trevToCompact := max(0, currentRevision-10000)\n\t\tplog.Printf(\"[round#%d] compacting storage at %d (current revision %d)\", i, revToCompact, currentRevision)\n\t\tif err := tt.cluster.compactKV(revToCompact); err != nil {\n\t\t\tplog.Printf(\"[round#%d] compactKV error (%v)\", i, err)\n\t\t\tif err := tt.cleanup(i, 0); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d] cleanup error: %v\", i, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tplog.Printf(\"[round#%d] compacted storage\", i)\n\n\t\tplog.Printf(\"[round#%d] check compaction at %d\", i, revToCompact)\n\t\tif err := tt.cluster.checkCompact(revToCompact); err != nil {\n\t\t\tplog.Printf(\"[round#%d] checkCompact error (%v)\", i, err)\n\t\t\tif err := tt.cleanup(i, 0); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d] cleanup error: %v\", i, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tplog.Printf(\"[round#%d] confirmed compaction at %d\", i, revToCompact)\n\n\t\tif i > 0 && i%500 == 0 { \/\/ every 500 rounds\n\t\t\tplog.Printf(\"[round#%d] canceling the stressers...\", i)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\ts.Cancel()\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d] canceled stressers\", i)\n\n\t\t\tplog.Printf(\"[round#%d] deframenting...\", i)\n\t\t\tif err := tt.cluster.defrag(); err != nil {\n\t\t\t\tplog.Printf(\"[round#%d] defrag error (%v)\", i, err)\n\t\t\t\tif err := tt.cleanup(i, 0); err != nil {\n\t\t\t\t\tplog.Printf(\"[round#%d] cleanup error: %v\", i, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tplog.Printf(\"[round#%d] deframented...\", i)\n\n\t\t\tplog.Printf(\"[round#%d] restarting the stressers...\", i)\n\t\t\tfor _, s := range tt.cluster.Stressers {\n\t\t\t\tgo s.Stress()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (tt *tester) cleanup(i, j int) error {\n\troundFailedTotalCounter.Inc()\n\tcaseFailedTotalCounter.WithLabelValues(tt.failures[j].Desc()).Inc()\n\n\tplog.Printf(\"[round#%d case#%d] cleaning up...\", i, j)\n\tif err := tt.cluster.Cleanup(); err != nil {\n\t\treturn err\n\t}\n\treturn tt.cluster.Bootstrap()\n}\n\ntype Status struct {\n\tSince time.Time\n\tFailures []string\n\tRoundLimit int\n\n\tCluster ClusterStatus\n\tcluster *cluster\n\n\tmu sync.Mutex \/\/ guards Round and Case\n\tRound int\n\tCase int\n}\n\nfunc (s *Status) setRound(r int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.Round = r\n}\n\nfunc (s *Status) setCase(c int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.Case = c\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integration,etcd\n\npackage integration\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkclient \"k8s.io\/kubernetes\/pkg\/client\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\n\tbuildapi \"github.com\/openshift\/origin\/pkg\/build\/api\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/server\/bootstrappolicy\"\n\timageapi \"github.com\/openshift\/origin\/pkg\/image\/api\"\n\ttestutil \"github.com\/openshift\/origin\/test\/util\"\n)\n\nfunc init() {\n\ttestutil.RequireEtcd()\n}\n\nfunc TestWebhookGitHubPushWithImage(t *testing.T) {\n\t_, clusterAdminKubeConfig, err := testutil.StartTestMaster()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\terr = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)\n\tcheckErr(t, err)\n\n\tif err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\t\/\/ create imagerepo\n\timageStream := &imageapi.ImageStream{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tSpec: imageapi.ImageStreamSpec{\n\t\t\tDockerImageRepository: \"registry:3000\/integration\/imageStream\",\n\t\t\tTags: map[string]imageapi.TagReference{\n\t\t\t\t\"validTag\": {\n\t\t\t\t\tFrom: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"registry:3000\/integration\/imageStream:success\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif _, err := clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tism := &imageapi.ImageStreamMapping{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tTag: \"validTag\",\n\t\tImage: imageapi.Image{\n\t\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\t\tName: \"myimage\",\n\t\t\t},\n\t\t\tDockerImageReference: \"registry:3000\/integration\/imageStream:success\",\n\t\t},\n\t}\n\tif err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(ism); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\t\/\/ create buildconfig\n\tbuildConfig := mockBuildConfigImageParms(\"originalImage\", \"imageStream\", \"validTag\")\n\n\tif _, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\twatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), \"0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't subscribe to builds: %v\", err)\n\t}\n\tdefer watch.Stop()\n\n\tfor _, s := range []string{\n\t\t\"\/oapi\/v1\/namespaces\/\" + testutil.Namespace() + \"\/buildconfigs\/pushbuild\/webhooks\/secret101\/github\",\n\t} {\n\n\t\t\/\/ trigger build event sending push notification\n\t\tpostFile(clusterAdminClient.RESTClient.Client, \"push\", \"pushevent.json\", clusterAdminClientConfig.Host+s, http.StatusOK, t)\n\n\t\tevent := <-watch.ResultChan()\n\t\tactual := event.Object.(*buildapi.Build)\n\n\t\t\/\/ FIXME: I think the build creation is fast and in some situlation we miss\n\t\t\/\/ the BuildPhaseNew here. Note that this is not a bug, in future we should\n\t\t\/\/ move this to use go routine to capture all events.\n\t\tif actual.Status.Phase != buildapi.BuildPhaseNew && actual.Status.Phase != buildapi.BuildPhasePending {\n\t\t\tt.Errorf(\"Expected %s or %s, got %s\", buildapi.BuildPhaseNew, buildapi.BuildPhasePending, actual.Status.Phase)\n\t\t}\n\n\t\tif actual.Spec.Strategy.DockerStrategy.From.Name != \"originalImage\" {\n\t\t\tt.Errorf(\"Expected %s, got %s\", \"originalImage\", actual.Spec.Strategy.DockerStrategy.From.Name)\n\t\t}\n\t}\n}\n\nfunc TestWebhookGitHubPushWithImageStream(t *testing.T) {\n\t_, clusterAdminKubeConfig, err := testutil.StartTestMaster()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)\n\tcheckErr(t, err)\n\n\terr = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tif err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\t\/\/ create imagerepo\n\timageStream := &imageapi.ImageStream{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tSpec: imageapi.ImageStreamSpec{\n\t\t\tDockerImageRepository: \"registry:3000\/integration\/imageStream\",\n\t\t\tTags: map[string]imageapi.TagReference{\n\t\t\t\t\"validTag\": {\n\t\t\t\t\tFrom: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"registry:3000\/integration\/imageStream:success\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif _, err := clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tism := &imageapi.ImageStreamMapping{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tTag: \"validTag\",\n\t\tImage: imageapi.Image{\n\t\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\t\tName: \"myimage\",\n\t\t\t},\n\t\t\tDockerImageReference: \"registry:3000\/integration\/imageStream:success\",\n\t\t},\n\t}\n\tif err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(ism); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\t\/\/ create buildconfig\n\tbuildConfig := mockBuildConfigImageStreamParms(\"originalImage\", \"image-stream\", \"validTag\")\n\n\tif _, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\twatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), \"0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't subscribe to builds: %v\", err)\n\t}\n\tdefer watch.Stop()\n\n\tfor _, s := range []string{\n\t\t\"\/oapi\/v1\/namespaces\/\" + testutil.Namespace() + \"\/buildconfigs\/pushbuild\/webhooks\/secret101\/github\",\n\t} {\n\n\t\t\/\/ trigger build event sending push notification\n\t\tpostFile(clusterAdminClient.RESTClient.Client, \"push\", \"pushevent.json\", clusterAdminClientConfig.Host+s, http.StatusOK, t)\n\n\t\tevent := <-watch.ResultChan()\n\t\tactual := event.Object.(*buildapi.Build)\n\n\t\t\/\/ FIXME: I think the build creation is fast and in some situlation we miss\n\t\t\/\/ the BuildPhaseNew here. Note that this is not a bug, in future we should\n\t\t\/\/ move this to use go routine to capture all events.\n\t\tif actual.Status.Phase != buildapi.BuildPhaseNew && actual.Status.Phase != buildapi.BuildPhasePending {\n\t\t\tt.Errorf(\"Expected %s or %s, got %s\", buildapi.BuildPhaseNew, buildapi.BuildPhasePending, actual.Status.Phase)\n\t\t}\n\n\t\tif actual.Spec.Strategy.SourceStrategy.From.Name != \"registry:3000\/integration\/imageStream:success\" {\n\t\t\tt.Errorf(\"Expected %s, got %s\", \"registry:3000\/integration-test\/imageStream:success\", actual.Spec.Strategy.SourceStrategy.From.Name)\n\t\t}\n\t}\n}\n\nfunc TestWebhookGitHubPing(t *testing.T) {\n\ttestutil.DeleteAllEtcdKeys()\n\topenshift := NewTestBuildOpenshift(t)\n\tdefer openshift.Close()\n\n\topenshift.KubeClient.Namespaces().Create(&kapi.Namespace{\n\t\tObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},\n\t})\n\n\t\/\/ create buildconfig\n\tbuildConfig := mockBuildConfigImageParms(\"originalImage\", \"imageStream\", \"validTag\")\n\tif _, err := openshift.Client.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\twatch, err := openshift.Client.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), \"0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't subscribe to builds: %v\", err)\n\t}\n\tdefer watch.Stop()\n\n\tfor _, s := range []string{\n\t\t\"\/oapi\/v1\/namespaces\/\" + testutil.Namespace() + \"\/buildconfigs\/pushbuild\/webhooks\/secret101\/github\",\n\t} {\n\t\t\/\/ trigger build event sending push notification\n\t\tpostFile(&http.Client{}, \"ping\", \"pingevent.json\", openshift.server.URL+s, http.StatusOK, t)\n\n\t\t\/\/ TODO: improve negative testing\n\t\ttimer := time.NewTimer(time.Second \/ 2)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\t\/\/ nothing should happen\n\t\tcase event := <-watch.ResultChan():\n\t\t\tbuild := event.Object.(*buildapi.Build)\n\t\t\tt.Fatalf(\"Unexpected build created: %#v\", build)\n\t\t}\n\t}\n}\n\nfunc postFile(client kclient.HTTPClient, event, filename, url string, expStatusCode int, t *testing.T) {\n\tdata, err := ioutil.ReadFile(\"..\/..\/pkg\/build\/webhook\/github\/fixtures\/\" + filename)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to open %s: %v\", filename, err)\n\t}\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader(data))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating POST request: %v\", err)\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"GitHub-Hookshot\/github\")\n\treq.Header.Add(\"X-Github-Event\", event)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed posting webhook: %v\", err)\n\t}\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode != expStatusCode {\n\t\tt.Errorf(\"Wrong response code, expecting %d, got %s: %s!\", expStatusCode, resp.StatusCode, string(body))\n\t}\n}\n\nfunc mockBuildConfigImageParms(imageName, imageStream, imageTag string) *buildapi.BuildConfig {\n\treturn &buildapi.BuildConfig{\n\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\tName: \"pushbuild\",\n\t\t},\n\t\tSpec: buildapi.BuildConfigSpec{\n\t\t\tTriggers: []buildapi.BuildTriggerPolicy{\n\t\t\t\t{\n\t\t\t\t\tType: buildapi.GitHubWebHookBuildTriggerType,\n\t\t\t\t\tGitHubWebHook: &buildapi.WebHookTrigger{\n\t\t\t\t\t\tSecret: \"secret101\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: buildapi.ImageChangeBuildTriggerType,\n\t\t\t\t\tImageChange: &buildapi.ImageChangeTrigger{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuildSpec: buildapi.BuildSpec{\n\t\t\t\tSource: buildapi.BuildSource{\n\t\t\t\t\tType: buildapi.BuildSourceGit,\n\t\t\t\t\tGit: &buildapi.GitBuildSource{\n\t\t\t\t\t\tURI: \"http:\/\/my.docker\/build\",\n\t\t\t\t\t},\n\t\t\t\t\tContextDir: \"context\",\n\t\t\t\t},\n\t\t\t\tStrategy: buildapi.BuildStrategy{\n\t\t\t\t\tType: buildapi.DockerBuildStrategyType,\n\t\t\t\t\tDockerStrategy: &buildapi.DockerBuildStrategy{\n\t\t\t\t\t\tFrom: &kapi.ObjectReference{\n\t\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\t\tName: imageName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOutput: buildapi.BuildOutput{\n\t\t\t\t\tTo: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"namespace\/builtimage\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc mockBuildConfigImageStreamParms(imageName, imageStream, imageTag string) *buildapi.BuildConfig {\n\treturn &buildapi.BuildConfig{\n\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\tName: \"pushbuild\",\n\t\t},\n\t\tSpec: buildapi.BuildConfigSpec{\n\t\t\tTriggers: []buildapi.BuildTriggerPolicy{\n\t\t\t\t{\n\t\t\t\t\tType: buildapi.GitHubWebHookBuildTriggerType,\n\t\t\t\t\tGitHubWebHook: &buildapi.WebHookTrigger{\n\t\t\t\t\t\tSecret: \"secret101\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: buildapi.ImageChangeBuildTriggerType,\n\t\t\t\t\tImageChange: &buildapi.ImageChangeTrigger{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuildSpec: buildapi.BuildSpec{\n\t\t\t\tSource: buildapi.BuildSource{\n\t\t\t\t\tType: buildapi.BuildSourceGit,\n\t\t\t\t\tGit: &buildapi.GitBuildSource{\n\t\t\t\t\t\tURI: \"http:\/\/my.docker\/build\",\n\t\t\t\t\t},\n\t\t\t\t\tContextDir: \"context\",\n\t\t\t\t},\n\t\t\t\tStrategy: buildapi.BuildStrategy{\n\t\t\t\t\tType: buildapi.SourceBuildStrategyType,\n\t\t\t\t\tSourceStrategy: &buildapi.SourceBuildStrategy{\n\t\t\t\t\t\tFrom: kapi.ObjectReference{\n\t\t\t\t\t\t\tKind: \"ImageStreamTag\",\n\t\t\t\t\t\t\tName: imageStream + \":\" + imageTag,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOutput: buildapi.BuildOutput{\n\t\t\t\t\tTo: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"namespace\/builtimage\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<commit_msg>Remove flakiness in webhook test<commit_after>\/\/ +build integration,etcd\n\npackage integration\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkclient \"k8s.io\/kubernetes\/pkg\/client\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\n\tbuildapi \"github.com\/openshift\/origin\/pkg\/build\/api\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/server\/bootstrappolicy\"\n\timageapi \"github.com\/openshift\/origin\/pkg\/image\/api\"\n\ttestutil \"github.com\/openshift\/origin\/test\/util\"\n)\n\nfunc init() {\n\ttestutil.RequireEtcd()\n}\n\nfunc TestWebhookGitHubPushWithImage(t *testing.T) {\n\t_, clusterAdminKubeConfig, err := testutil.StartTestMaster()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\terr = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)\n\tcheckErr(t, err)\n\n\tif err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\t\/\/ create imagerepo\n\timageStream := &imageapi.ImageStream{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tSpec: imageapi.ImageStreamSpec{\n\t\t\tDockerImageRepository: \"registry:3000\/integration\/imageStream\",\n\t\t\tTags: map[string]imageapi.TagReference{\n\t\t\t\t\"validTag\": {\n\t\t\t\t\tFrom: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"registry:3000\/integration\/imageStream:success\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif _, err := clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tism := &imageapi.ImageStreamMapping{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tTag: \"validTag\",\n\t\tImage: imageapi.Image{\n\t\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\t\tName: \"myimage\",\n\t\t\t},\n\t\t\tDockerImageReference: \"registry:3000\/integration\/imageStream:success\",\n\t\t},\n\t}\n\tif err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(ism); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\t\/\/ create buildconfig\n\tbuildConfig := mockBuildConfigImageParms(\"originalImage\", \"imageStream\", \"validTag\")\n\n\tif _, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\twatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), \"0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't subscribe to builds: %v\", err)\n\t}\n\tdefer watch.Stop()\n\n\tfor _, s := range []string{\n\t\t\"\/oapi\/v1\/namespaces\/\" + testutil.Namespace() + \"\/buildconfigs\/pushbuild\/webhooks\/secret101\/github\",\n\t} {\n\n\t\t\/\/ trigger build event sending push notification\n\t\tpostFile(clusterAdminClient.RESTClient.Client, \"push\", \"pushevent.json\", clusterAdminClientConfig.Host+s, http.StatusOK, t)\n\n\t\tevent := <-watch.ResultChan()\n\t\tactual := event.Object.(*buildapi.Build)\n\n\t\t\/\/ FIXME: I think the build creation is fast and in some situlation we miss\n\t\t\/\/ the BuildPhaseNew here. Note that this is not a bug, in future we should\n\t\t\/\/ move this to use go routine to capture all events.\n\t\tif actual.Status.Phase != buildapi.BuildPhaseNew && actual.Status.Phase != buildapi.BuildPhasePending {\n\t\t\tt.Errorf(\"Expected %s or %s, got %s\", buildapi.BuildPhaseNew, buildapi.BuildPhasePending, actual.Status.Phase)\n\t\t}\n\n\t\tif actual.Spec.Strategy.DockerStrategy.From.Name != \"originalImage\" {\n\t\t\tt.Errorf(\"Expected %s, got %s\", \"originalImage\", actual.Spec.Strategy.DockerStrategy.From.Name)\n\t\t}\n\t}\n}\n\nfunc TestWebhookGitHubPushWithImageStream(t *testing.T) {\n\t_, clusterAdminKubeConfig, err := testutil.StartTestMaster()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tclusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)\n\tcheckErr(t, err)\n\n\terr = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\tif err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\n\t\/\/ create imagerepo\n\timageStream := &imageapi.ImageStream{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tSpec: imageapi.ImageStreamSpec{\n\t\t\tDockerImageRepository: \"registry:3000\/integration\/imageStream\",\n\t\t\tTags: map[string]imageapi.TagReference{\n\t\t\t\t\"validTag\": {\n\t\t\t\t\tFrom: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"registry:3000\/integration\/imageStream:success\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif _, err := clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tism := &imageapi.ImageStreamMapping{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"image-stream\"},\n\t\tTag: \"validTag\",\n\t\tImage: imageapi.Image{\n\t\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\t\tName: \"myimage\",\n\t\t\t},\n\t\t\tDockerImageReference: \"registry:3000\/integration\/imageStream:success\",\n\t\t},\n\t}\n\tif err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(ism); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\t\/\/ create buildconfig\n\tbuildConfig := mockBuildConfigImageStreamParms(\"originalImage\", \"image-stream\", \"validTag\")\n\n\tif _, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\twatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), \"0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't subscribe to builds: %v\", err)\n\t}\n\tdefer watch.Stop()\n\n\tfor _, s := range []string{\n\t\t\"\/oapi\/v1\/namespaces\/\" + testutil.Namespace() + \"\/buildconfigs\/pushbuild\/webhooks\/secret101\/github\",\n\t} {\n\n\t\t\/\/ trigger build event sending push notification\n\t\tpostFile(clusterAdminClient.RESTClient.Client, \"push\", \"pushevent.json\", clusterAdminClientConfig.Host+s, http.StatusOK, t)\n\n\t\tevent := <-watch.ResultChan()\n\t\tactual := event.Object.(*buildapi.Build)\n\n\t\t\/\/ FIXME: I think the build creation is fast and in some situlation we miss\n\t\t\/\/ the BuildPhaseNew here. Note that this is not a bug, in future we should\n\t\t\/\/ move this to use go routine to capture all events.\n\t\tif actual.Status.Phase != buildapi.BuildPhaseNew && actual.Status.Phase != buildapi.BuildPhasePending {\n\t\t\tt.Errorf(\"Expected %s or %s, got %s\", buildapi.BuildPhaseNew, buildapi.BuildPhasePending, actual.Status.Phase)\n\t\t}\n\n\t\tif actual.Spec.Strategy.SourceStrategy.From.Name != \"registry:3000\/integration\/imageStream:success\" {\n\t\t\tt.Errorf(\"Expected %s, got %s\", \"registry:3000\/integration-test\/imageStream:success\", actual.Spec.Strategy.SourceStrategy.From.Name)\n\t\t}\n\t}\n}\n\nfunc TestWebhookGitHubPing(t *testing.T) {\n\ttestutil.DeleteAllEtcdKeys()\n\topenshift := NewTestBuildOpenshift(t)\n\tdefer openshift.Close()\n\n\topenshift.KubeClient.Namespaces().Create(&kapi.Namespace{\n\t\tObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},\n\t})\n\n\t\/\/ create buildconfig\n\tbuildConfig := mockBuildConfigImageParms(\"originalImage\", \"imageStream\", \"validTag\")\n\tif _, err := openshift.Client.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\twatch, err := openshift.Client.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), \"0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't subscribe to builds: %v\", err)\n\t}\n\tdefer watch.Stop()\n\n\tfor _, s := range []string{\n\t\t\"\/oapi\/v1\/namespaces\/\" + testutil.Namespace() + \"\/buildconfigs\/pushbuild\/webhooks\/secret101\/github\",\n\t} {\n\t\t\/\/ trigger build event sending push notification\n\t\tpostFile(&http.Client{}, \"ping\", \"pingevent.json\", openshift.server.URL+s, http.StatusOK, t)\n\n\t\t\/\/ TODO: improve negative testing\n\t\ttimer := time.NewTimer(time.Second \/ 2)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\t\/\/ nothing should happen\n\t\tcase event := <-watch.ResultChan():\n\t\t\tbuild := event.Object.(*buildapi.Build)\n\t\t\tt.Fatalf(\"Unexpected build created: %#v\", build)\n\t\t}\n\t}\n}\n\nfunc postFile(client kclient.HTTPClient, event, filename, url string, expStatusCode int, t *testing.T) {\n\tdata, err := ioutil.ReadFile(\"..\/..\/pkg\/build\/webhook\/github\/fixtures\/\" + filename)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to open %s: %v\", filename, err)\n\t}\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader(data))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating POST request: %v\", err)\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"GitHub-Hookshot\/github\")\n\treq.Header.Add(\"X-Github-Event\", event)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed posting webhook: %v\", err)\n\t}\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode != expStatusCode {\n\t\tt.Errorf(\"Wrong response code, expecting %d, got %s: %s!\", expStatusCode, resp.StatusCode, string(body))\n\t}\n}\n\nfunc mockBuildConfigImageParms(imageName, imageStream, imageTag string) *buildapi.BuildConfig {\n\treturn &buildapi.BuildConfig{\n\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\tName: \"pushbuild\",\n\t\t},\n\t\tSpec: buildapi.BuildConfigSpec{\n\t\t\tTriggers: []buildapi.BuildTriggerPolicy{\n\t\t\t\t{\n\t\t\t\t\tType: buildapi.GitHubWebHookBuildTriggerType,\n\t\t\t\t\tGitHubWebHook: &buildapi.WebHookTrigger{\n\t\t\t\t\t\tSecret: \"secret101\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuildSpec: buildapi.BuildSpec{\n\t\t\t\tSource: buildapi.BuildSource{\n\t\t\t\t\tType: buildapi.BuildSourceGit,\n\t\t\t\t\tGit: &buildapi.GitBuildSource{\n\t\t\t\t\t\tURI: \"http:\/\/my.docker\/build\",\n\t\t\t\t\t},\n\t\t\t\t\tContextDir: \"context\",\n\t\t\t\t},\n\t\t\t\tStrategy: buildapi.BuildStrategy{\n\t\t\t\t\tType: buildapi.DockerBuildStrategyType,\n\t\t\t\t\tDockerStrategy: &buildapi.DockerBuildStrategy{\n\t\t\t\t\t\tFrom: &kapi.ObjectReference{\n\t\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\t\tName: imageName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOutput: buildapi.BuildOutput{\n\t\t\t\t\tTo: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"namespace\/builtimage\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc mockBuildConfigImageStreamParms(imageName, imageStream, imageTag string) *buildapi.BuildConfig {\n\treturn &buildapi.BuildConfig{\n\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\tName: \"pushbuild\",\n\t\t},\n\t\tSpec: buildapi.BuildConfigSpec{\n\t\t\tTriggers: []buildapi.BuildTriggerPolicy{\n\t\t\t\t{\n\t\t\t\t\tType: buildapi.GitHubWebHookBuildTriggerType,\n\t\t\t\t\tGitHubWebHook: &buildapi.WebHookTrigger{\n\t\t\t\t\t\tSecret: \"secret101\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuildSpec: buildapi.BuildSpec{\n\t\t\t\tSource: buildapi.BuildSource{\n\t\t\t\t\tType: buildapi.BuildSourceGit,\n\t\t\t\t\tGit: &buildapi.GitBuildSource{\n\t\t\t\t\t\tURI: \"http:\/\/my.docker\/build\",\n\t\t\t\t\t},\n\t\t\t\t\tContextDir: \"context\",\n\t\t\t\t},\n\t\t\t\tStrategy: buildapi.BuildStrategy{\n\t\t\t\t\tType: buildapi.SourceBuildStrategyType,\n\t\t\t\t\tSourceStrategy: &buildapi.SourceBuildStrategy{\n\t\t\t\t\t\tFrom: kapi.ObjectReference{\n\t\t\t\t\t\t\tKind: \"ImageStreamTag\",\n\t\t\t\t\t\t\tName: imageStream + \":\" + imageTag,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOutput: buildapi.BuildOutput{\n\t\t\t\t\tTo: &kapi.ObjectReference{\n\t\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\t\tName: \"namespace\/builtimage\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package cancellable provides helper function to cancel http requests.\npackage cancellable\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\n\t\"github.com\/docker\/engine-api\/client\/transport\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc nop() {}\n\nvar (\n\ttestHookContextDoneBeforeHeaders = nop\n\ttestHookDoReturned = nop\n\ttestHookDidBodyClose = nop\n)\n\n\/\/ Do sends an HTTP request with the provided transport.Sender and returns an HTTP response.\n\/\/ If the client is nil, http.DefaultClient is used.\n\/\/ If the context is canceled or times out, ctx.Err() will be returned.\n\/\/\n\/\/ FORK INFORMATION:\n\/\/\n\/\/ This function deviates from the upstream version in golang.org\/x\/net\/context\/ctxhttp by\n\/\/ taking a Sender interface rather than a *http.Client directly. That allow us to use\n\/\/ this function with mocked clients and hijacked connections.\nfunc Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\t\/\/ Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go.\n\tcancel := canceler(client, req)\n\n\ttype responseAndError struct {\n\t\tresp *http.Response\n\t\terr error\n\t}\n\tresult := make(chan responseAndError, 1)\n\n\tgo func() {\n\t\tresp, err := client.Do(req)\n\t\ttestHookDoReturned()\n\t\tresult <- responseAndError{resp, err}\n\t}()\n\n\tvar resp *http.Response\n\n\tselect {\n\tcase <-ctx.Done():\n\t\ttestHookContextDoneBeforeHeaders()\n\t\tcancel()\n\t\t\/\/ Clean up after the goroutine calling client.Do:\n\t\tgo func() {\n\t\t\tif r := <-result; r.resp != nil && r.resp.Body != nil {\n\t\t\t\ttestHookDidBodyClose()\n\t\t\t\tr.resp.Body.Close()\n\t\t\t}\n\t\t}()\n\t\treturn nil, ctx.Err()\n\tcase r := <-result:\n\t\tvar err error\n\t\tresp, err = r.resp, r.err\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t}\n\n\tc := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\tcase <-c:\n\t\t\t\/\/ The response's Body is closed.\n\t\t}\n\t}()\n\tresp.Body = ¬ifyingReader{resp.Body, c}\n\n\treturn resp, nil\n}\n\n\/\/ notifyingReader is an io.ReadCloser that closes the notify channel after\n\/\/ Close is called or a Read fails on the underlying ReadCloser.\ntype notifyingReader struct {\n\tio.ReadCloser\n\tnotify chan<- struct{}\n}\n\nfunc (r *notifyingReader) Read(p []byte) (int, error) {\n\tn, err := r.ReadCloser.Read(p)\n\tif err != nil && r.notify != nil {\n\t\tclose(r.notify)\n\t\tr.notify = nil\n\t}\n\treturn n, err\n}\n\nfunc (r *notifyingReader) Close() error {\n\terr := r.ReadCloser.Close()\n\tif r.notify != nil {\n\t\tclose(r.notify)\n\t\tr.notify = nil\n\t}\n\treturn err\n}\n<commit_msg>cancellable: use sync.Once to prevent double channel close<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package cancellable provides helper function to cancel http requests.\npackage cancellable\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"sync\"\n\n\t\"github.com\/docker\/engine-api\/client\/transport\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc nop() {}\n\nvar (\n\ttestHookContextDoneBeforeHeaders = nop\n\ttestHookDoReturned = nop\n\ttestHookDidBodyClose = nop\n)\n\n\/\/ Do sends an HTTP request with the provided transport.Sender and returns an HTTP response.\n\/\/ If the client is nil, http.DefaultClient is used.\n\/\/ If the context is canceled or times out, ctx.Err() will be returned.\n\/\/\n\/\/ FORK INFORMATION:\n\/\/\n\/\/ This function deviates from the upstream version in golang.org\/x\/net\/context\/ctxhttp by\n\/\/ taking a Sender interface rather than a *http.Client directly. That allow us to use\n\/\/ this function with mocked clients and hijacked connections.\nfunc Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\t\/\/ Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go.\n\tcancel := canceler(client, req)\n\n\ttype responseAndError struct {\n\t\tresp *http.Response\n\t\terr error\n\t}\n\tresult := make(chan responseAndError, 1)\n\n\tgo func() {\n\t\tresp, err := client.Do(req)\n\t\ttestHookDoReturned()\n\t\tresult <- responseAndError{resp, err}\n\t}()\n\n\tvar resp *http.Response\n\n\tselect {\n\tcase <-ctx.Done():\n\t\ttestHookContextDoneBeforeHeaders()\n\t\tcancel()\n\t\t\/\/ Clean up after the goroutine calling client.Do:\n\t\tgo func() {\n\t\t\tif r := <-result; r.resp != nil && r.resp.Body != nil {\n\t\t\t\ttestHookDidBodyClose()\n\t\t\t\tr.resp.Body.Close()\n\t\t\t}\n\t\t}()\n\t\treturn nil, ctx.Err()\n\tcase r := <-result:\n\t\tvar err error\n\t\tresp, err = r.resp, r.err\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t}\n\n\tc := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\tcase <-c:\n\t\t\t\/\/ The response's Body is closed.\n\t\t}\n\t}()\n\tresp.Body = ¬ifyingReader{ReadCloser: resp.Body, notify: c}\n\n\treturn resp, nil\n}\n\n\/\/ notifyingReader is an io.ReadCloser that closes the notify channel after\n\/\/ Close is called or a Read fails on the underlying ReadCloser.\ntype notifyingReader struct {\n\tio.ReadCloser\n\tnotify chan<- struct{}\n\tnotifyOnce sync.Once\n}\n\nfunc (r *notifyingReader) Read(p []byte) (int, error) {\n\tn, err := r.ReadCloser.Read(p)\n\tif err != nil {\n\t\tr.notifyOnce.Do(func() {\n\t\t\tclose(r.notify)\n\t\t})\n\t}\n\treturn n, err\n}\n\nfunc (r *notifyingReader) Close() error {\n\terr := r.ReadCloser.Close()\n\tr.notifyOnce.Do(func() {\n\t\tclose(r.notify)\n\t})\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"time\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/strategicpatch\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t_ \"k8s.io\/client-go\/plugin\/pkg\/client\/auth\/gcp\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\nconst (\n\tannotation = \"iam.cloud.google.com\/account-name\"\n\tinitializerName = \"serviceaccounts.cloud.google.com\"\n\tdefaultNamespace = \"default\"\n\tresyncPeriod = 30 * time.Second\n\n\tsecretMountPath = \"\/var\/run\/secrets\/gcp\/\"\n\tserviceAccountFile = \"key.json\"\n)\n\ntype config struct {\n\tContainers []corev1.Container\n\tVolumes []corev1.Volume\n}\n\nfunc main() {\n\tlog.Println(\"Starting the GCP Service accounts initializer...\")\n\n\tlog.Println(\"Using in-cluster token discovery\")\n\tclusterConfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\tlog.Printf(\"failed to use in-cluster token: %+v\", err)\n\t\tkubecfg := filepath.Join(os.Getenv(\"HOME\"), \".kube\", \"config\")\n\t\tlog.Printf(\"Using kubeconfig file at %s\", kubecfg)\n\t\tclusterConfig, err = clientcmd.BuildConfigFromFlags(\"\", kubecfg)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to find kubeconfig file: %+v\", err)\n\t\t\tlog.Fatal(\"No authentication is available.\")\n\t\t}\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(clusterConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to initialize kubernetes client: %+v\", err)\n\t}\n\n\t\/\/ Watch uninitialized Pods in all namespaces.\n\trestClient := clientset.CoreV1().RESTClient()\n\twatchlist := cache.NewListWatchFromClient(restClient,\n\t\t\"pods\", corev1.NamespaceAll, fields.Everything())\n\n\t\/\/ Wrap the returned watchlist to workaround the inability to include\n\t\/\/ the `IncludeUninitialized` list option when setting up watch clients.\n\tincludeUninitializedWatchlist := &cache.ListWatch{\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.List(options)\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.Watch(options)\n\t\t},\n\t}\n\n\t_, controller := cache.NewInformer(includeUninitializedWatchlist,\n\t\t&corev1.Pod{},\n\t\tresyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tpod, ok := obj.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Fatalf(\"watch returned non-pod object: %T\", pod)\n\t\t\t\t}\n\n\t\t\t\tif !needsInitialization(pod) {\n\t\t\t\t\tlog.Printf(\"does not need initialization: pod\/%s\", pod.GetName())\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tmodifiedPod, err := clonePod(pod)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error cloning pod object: %+v\", err)\n\t\t\t\t}\n\n\t\t\t\tif !injectPod(modifiedPod) {\n\t\t\t\t\tlog.Printf(\"no injection for pod\/%s\", pod.GetName())\n\t\t\t\t}\n\n\t\t\t\tif err := patchPod(pod, modifiedPod, clientset); err != nil {\n\t\t\t\t\tlog.Printf(\"error saving pod\/%s: %+v\", pod.GetName(), err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"initialized pod\/%s\", pod.GetName())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\n\tstop := make(chan struct{})\n\tgo controller.Run(stop)\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\t<-signalChan\n\n\tlog.Println(\"Shutdown signal received, exiting...\")\n\tclose(stop)\n}\n\n\/\/ needsInitialization determines if the pod is required to be initialized\n\/\/ currently by this initializer.\nfunc needsInitialization(pod *corev1.Pod) bool {\n\tinitializers := pod.ObjectMeta.GetInitializers()\n\treturn initializers != nil &&\n\t\tlen(initializers.Pending) > 0 &&\n\t\tinitializers.Pending[0].Name == initializerName\n}\n\n\/\/ clonePod creates a deep copy of pod for modification.\nfunc clonePod(pod *corev1.Pod) (*corev1.Pod, error) {\n\to, err := runtime.NewScheme().DeepCopy(pod)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to deepcopy: %+v\", err)\n\t}\n\tp, ok := o.(*corev1.Pod)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cloned object is not a Pod: %T\", p)\n\t}\n\treturn p, nil\n}\n\n\/\/ removeSelfPendingInitializer removes the first element from pending\n\/\/ initializers list of in-memory pod value.\nfunc removeSelfPendingInitializer(pod *corev1.Pod) {\n\tpendingInitializers := pod.ObjectMeta.GetInitializers().Pending\n\tif len(pendingInitializers) == 1 {\n\t\tpod.ObjectMeta.Initializers = nil\n\t} else {\n\t\tpod.ObjectMeta.Initializers.Pending = append(\n\t\t\tpendingInitializers[:0], pendingInitializers[1:]...)\n\t}\n}\n\n\/\/ injectPod makes modifications to in-memory pod value to inject the service\n\/\/ account. Returns whether any modifications have been made.\nfunc injectPod(pod *corev1.Pod) bool {\n\tserviceAccountName, ok := pod.ObjectMeta.GetAnnotations()[annotation]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor i, c := range pod.Spec.Containers {\n\t\tvolName := fmt.Sprintf(\"gcp-%s\", serviceAccountName)\n\t\tmountPath := path.Join(secretMountPath, serviceAccountName)\n\t\tkeyPath := path.Join(mountPath, serviceAccountFile)\n\n\t\tpod.Spec.Volumes = append(pod.Spec.Volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: volName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: serviceAccountName,\n\t\t\t\t\t\tItems: []corev1.KeyToPath{{\n\t\t\t\t\t\t\tKey: \"key.json\",\n\t\t\t\t\t\t\tPath: \"key.json\",\n\t\t\t\t\t\t}}}}})\n\n\t\tpod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts,\n\t\t\tcorev1.VolumeMount{\n\t\t\t\tName: volName,\n\t\t\t\tMountPath: mountPath,\n\t\t\t\tSubPath: \"\",\n\t\t\t\tReadOnly: true})\n\n\t\tpod.Spec.Containers[i].Env = append(c.Env, corev1.EnvVar{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: keyPath})\n\t}\n\n\treturn true\n}\n\nfunc patchPod(origPod, newPod *corev1.Pod, clientset *kubernetes.Clientset) error {\n\torigData, err := json.Marshal(origPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal original pod: %+v\", err)\n\t}\n\n\tnewData, err := json.Marshal(newPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal modified pod: %+v\", err)\n\t}\n\n\tpatchBytes, err := strategicpatch.CreateTwoWayMergePatch(origData, newData, corev1.Pod{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create 2-way merge patch: %+v\", err)\n\t}\n\n\tif _, err = clientset.CoreV1().Pods(\"\").Patch(origPod.GetName(),\n\t\ttypes.StrategicMergePatchType, patchBytes); err != nil {\n\t\treturn fmt.Errorf(\"failed to patch pod\/%s: %+v\", origPod.GetName(), err)\n\t}\n\treturn nil\n}\n<commit_msg>refactor patchPod method<commit_after>\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"time\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/strategicpatch\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t_ \"k8s.io\/client-go\/plugin\/pkg\/client\/auth\/gcp\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\nconst (\n\tannotation = \"iam.cloud.google.com\/account-name\"\n\tinitializerName = \"serviceaccounts.cloud.google.com\"\n\tdefaultNamespace = \"default\"\n\tresyncPeriod = 30 * time.Second\n\n\tsecretMountPath = \"\/var\/run\/secrets\/gcp\/\"\n\tserviceAccountFile = \"key.json\"\n)\n\ntype config struct {\n\tContainers []corev1.Container\n\tVolumes []corev1.Volume\n}\n\nfunc main() {\n\tlog.Println(\"Starting the GCP Service accounts initializer...\")\n\n\tlog.Println(\"Using in-cluster token discovery\")\n\tclusterConfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\tlog.Printf(\"failed to use in-cluster token: %+v\", err)\n\t\tkubecfg := filepath.Join(os.Getenv(\"HOME\"), \".kube\", \"config\")\n\t\tlog.Printf(\"Using kubeconfig file at %s\", kubecfg)\n\t\tclusterConfig, err = clientcmd.BuildConfigFromFlags(\"\", kubecfg)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to find kubeconfig file: %+v\", err)\n\t\t\tlog.Fatal(\"No authentication is available.\")\n\t\t}\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(clusterConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to initialize kubernetes client: %+v\", err)\n\t}\n\n\t\/\/ Watch uninitialized Pods in all namespaces.\n\trestClient := clientset.CoreV1().RESTClient()\n\twatchlist := cache.NewListWatchFromClient(restClient,\n\t\t\"pods\", corev1.NamespaceAll, fields.Everything())\n\n\t\/\/ Wrap the returned watchlist to workaround the inability to include\n\t\/\/ the `IncludeUninitialized` list option when setting up watch clients.\n\tincludeUninitializedWatchlist := &cache.ListWatch{\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.List(options)\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.Watch(options)\n\t\t},\n\t}\n\n\t_, controller := cache.NewInformer(includeUninitializedWatchlist,\n\t\t&corev1.Pod{},\n\t\tresyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tpod, ok := obj.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Fatalf(\"watch returned non-pod object: %T\", pod)\n\t\t\t\t}\n\n\t\t\t\tif !needsInitialization(pod) {\n\t\t\t\t\tlog.Printf(\"does not need initialization: pod\/%s\", pod.GetName())\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tmodifiedPod, err := clonePod(pod)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error cloning pod object: %+v\", err)\n\t\t\t\t}\n\n\t\t\t\tif !injectPod(modifiedPod) {\n\t\t\t\t\tlog.Printf(\"no injection for pod\/%s\", pod.GetName())\n\t\t\t\t}\n\n\t\t\t\tif err := patchPod(pod, modifiedPod, clientset); err != nil {\n\t\t\t\t\tlog.Printf(\"error saving pod\/%s: %+v\", pod.GetName(), err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"initialized pod\/%s\", pod.GetName())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\n\tstop := make(chan struct{})\n\tgo controller.Run(stop)\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\t<-signalChan\n\n\tlog.Println(\"Shutdown signal received, exiting...\")\n\tclose(stop)\n}\n\n\/\/ needsInitialization determines if the pod is required to be initialized\n\/\/ currently by this initializer.\nfunc needsInitialization(pod *corev1.Pod) bool {\n\tinitializers := pod.ObjectMeta.GetInitializers()\n\treturn initializers != nil &&\n\t\tlen(initializers.Pending) > 0 &&\n\t\tinitializers.Pending[0].Name == initializerName\n}\n\n\/\/ clonePod creates a deep copy of pod for modification.\nfunc clonePod(pod *corev1.Pod) (*corev1.Pod, error) {\n\to, err := runtime.NewScheme().DeepCopy(pod)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to deepcopy: %+v\", err)\n\t}\n\tp, ok := o.(*corev1.Pod)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cloned object is not a Pod: %T\", p)\n\t}\n\treturn p, nil\n}\n\n\/\/ removeSelfPendingInitializer removes the first element from pending\n\/\/ initializers list of in-memory pod value.\nfunc removeSelfPendingInitializer(pod *corev1.Pod) {\n\tpendingInitializers := pod.ObjectMeta.GetInitializers().Pending\n\tif len(pendingInitializers) == 1 {\n\t\tpod.ObjectMeta.Initializers = nil\n\t} else {\n\t\tpod.ObjectMeta.Initializers.Pending = append(\n\t\t\tpendingInitializers[:0], pendingInitializers[1:]...)\n\t}\n}\n\n\/\/ injectPod makes modifications to in-memory pod value to inject the service\n\/\/ account. Returns whether any modifications have been made.\nfunc injectPod(pod *corev1.Pod) bool {\n\tserviceAccountName, ok := pod.ObjectMeta.GetAnnotations()[annotation]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor i, c := range pod.Spec.Containers {\n\t\tvolName := fmt.Sprintf(\"gcp-%s\", serviceAccountName)\n\t\tmountPath := path.Join(secretMountPath, serviceAccountName)\n\t\tkeyPath := path.Join(mountPath, serviceAccountFile)\n\n\t\tpod.Spec.Volumes = append(pod.Spec.Volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: volName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: serviceAccountName,\n\t\t\t\t\t\tItems: []corev1.KeyToPath{{\n\t\t\t\t\t\t\tKey: \"key.json\",\n\t\t\t\t\t\t\tPath: \"key.json\",\n\t\t\t\t\t\t}}}}})\n\n\t\tpod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts,\n\t\t\tcorev1.VolumeMount{\n\t\t\t\tName: volName,\n\t\t\t\tMountPath: mountPath,\n\t\t\t\tSubPath: \"\",\n\t\t\t\tReadOnly: true})\n\n\t\tpod.Spec.Containers[i].Env = append(c.Env, corev1.EnvVar{\n\t\t\tName: \"GOOGLE_APPLICATION_CREDENTIALS\",\n\t\t\tValue: keyPath})\n\t}\n\n\treturn true\n}\n\n\/\/ patchPod saves the pod to the API using a strategic 2-way JSON merge patch.\nfunc patchPod(origPod, newPod *corev1.Pod, clientset *kubernetes.Clientset) error {\n\torigData, err := json.Marshal(origPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal original pod: %+v\", err)\n\t}\n\n\tnewData, err := json.Marshal(newPod)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal modified pod: %+v\", err)\n\t}\n\n\tpatch, err := strategicpatch.CreateTwoWayMergePatch(origData, newData, corev1.Pod{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create 2-way merge patch: %+v\", err)\n\t}\n\n\tif _, err = clientset.CoreV1().Pods(corev1.NamespaceAll).Patch(\n\t\torigPod.GetName(), types.StrategicMergePatchType, patch); err != nil {\n\t\treturn fmt.Errorf(\"failed to patch pod\/%s: %+v\", origPod.GetName(), err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\n\/\/ A bucket that wraps another, calling its methods in a retry loop with\n\/\/ randomized exponential backoff.\ntype retryBucket struct {\n\tmaxSleep time.Duration\n\twrapped Bucket\n}\n\nfunc newRetryBucket(\n\tmaxSleep time.Duration,\n\twrapped Bucket) (b Bucket) {\n\tb = &retryBucket{\n\t\tmaxSleep: maxSleep,\n\t\twrapped: wrapped,\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc shouldRetry(err error) (b bool) {\n\t\/\/ HTTP 50x errors.\n\tif typed, ok := err.(*googleapi.Error); ok {\n\t\tif typed.Code >= 500 && typed.Code < 600 {\n\t\t\tb = true\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ HTTP 429 errors (GCS uses these for rate limiting).\n\tif typed, ok := err.(*googleapi.Error); ok {\n\t\tif typed.Code == 429 {\n\t\t\tb = true\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Network errors, which tend to show up transiently when doing lots of\n\t\/\/ operations in parallel. For example:\n\t\/\/\n\t\/\/ dial tcp 74.125.203.95:443: too many open files\n\t\/\/\n\tif _, ok := err.(*net.OpError); ok {\n\t\tb = true\n\t\treturn\n\t}\n\n\t\/\/ The HTTP package returns ErrUnexpectedEOF in several places. This seems to\n\t\/\/ come up when the server terminates the connection in the middle of an\n\t\/\/ object read.\n\tif err == io.ErrUnexpectedEOF {\n\t\tb = true\n\t\treturn\n\t}\n\n\t\/\/ The HTTP library also appears to leak EOF errors from... somewhere in its\n\t\/\/ guts as URL errors sometimes.\n\tif urlErr, ok := err.(*url.Error); ok {\n\t\tif urlErr.Err == io.EOF {\n\t\t\tb = true\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Sometimes the HTTP package helpfully encapsulates the real error in a URL\n\t\/\/ error.\n\tif urlErr, ok := err.(*url.Error); ok {\n\t\tb = shouldRetry(urlErr.Err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Choose an appropriate delay for exponential backoff, given that we have\n\/\/ already slept the given number of times for this logical request.\nfunc chooseDelay(prevSleepCount uint) (d time.Duration) {\n\tconst baseDelay = time.Millisecond\n\n\t\/\/ Choose a a delay in [0, 2^prevSleepCount * baseDelay).\n\td = (1 << prevSleepCount) * baseDelay\n\td = time.Duration(rand.Int63n(int64(d)))\n\n\treturn\n}\n\n\/\/ Exponential backoff for a function that might fail.\n\/\/\n\/\/ This is essentially what is described in the \"Best practices\" section of the\n\/\/ \"Upload Objects\" docs:\n\/\/\n\/\/\thttps:\/\/cloud.google.com\/storage\/docs\/json_api\/v1\/how-tos\/upload\n\/\/\n\/\/ with the following exceptions:\n\/\/\n\/\/ - We perform backoff for all operations.\n\/\/\n\/\/ - The random component scales with the delay, so that the first sleep\n\/\/ cannot be as long as one second. The algorithm used matches the\n\/\/ description at http:\/\/en.wikipedia.org\/wiki\/Exponential_backoff.\n\/\/\n\/\/ - We retry more types of errors; see shouldRetry above.\n\/\/\n\/\/ State for total sleep time and number of previous sleeps is housed outside\n\/\/ of this function to allow it to be \"resumed\" by multiple invocations of\n\/\/ retryObjectReader.Read.\nfunc expBackoff(\n\tctx context.Context,\n\tdesc string,\n\tmaxSleep time.Duration,\n\tf func() error,\n\tprevSleepCount *uint,\n\tprevSleepDuration *time.Duration) (err error) {\n\tfor {\n\t\t\/\/ Make an attempt. Stop if successful.\n\t\terr = f()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Do we want to retry?\n\t\tif !shouldRetry(err) {\n\t\t\t\/\/ Special case: don't spam up the logs for EOF, which io.Reader returns\n\t\t\t\/\/ in the normal course of things.\n\t\t\tif err != io.EOF {\n\t\t\t\terr = fmt.Errorf(\"not retrying %s: %w\", desc, err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Choose a a delay.\n\t\td := chooseDelay(*prevSleepCount)\n\t\t*prevSleepCount++\n\n\t\t\/\/ Are we out of credit?\n\t\tif *prevSleepDuration+d > maxSleep {\n\t\t\t\/\/ Return the most recent error.\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Sleep, returning early if cancelled.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ On cancellation, return the last error we saw.\n\t\t\treturn\n\n\t\tcase <-time.After(d):\n\t\t\t*prevSleepDuration += d\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n\/\/ Like expBackoff, but assumes that we've never slept before (and won't need\n\/\/ to sleep again).\nfunc oneShotExpBackoff(\n\tctx context.Context,\n\tdesc string,\n\tmaxSleep time.Duration,\n\tf func() error) (err error) {\n\tvar prevSleepCount uint\n\tvar prevSleepDuration time.Duration\n\n\terr = expBackoff(\n\t\tctx,\n\t\tdesc,\n\t\tmaxSleep,\n\t\tf,\n\t\t&prevSleepCount,\n\t\t&prevSleepDuration)\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Read support\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype retryObjectReader struct {\n\tbucket *retryBucket\n\n\t\/\/ The context we should watch when sleeping for retries.\n\tctx context.Context\n\n\t\/\/ What we are trying to read.\n\tname string\n\tgeneration int64\n\tbyteRange ByteRange\n\n\t\/\/ nil when we start or have seen a permanent error.\n\twrapped io.ReadCloser\n\n\t\/\/ If we've seen an error that we shouldn't retry for, this will be non-nil\n\t\/\/ and should be returned permanently.\n\tpermanentErr error\n\n\t\/\/ The number of times we've slept so far, and the total amount of time we've\n\t\/\/ spent sleeping.\n\tsleepCount uint\n\tsleepDuration time.Duration\n}\n\n\/\/ Set up the wrapped reader.\nfunc (rc *retryObjectReader) setUpWrapped() (err error) {\n\t\/\/ Call through to create the reader.\n\treq := &ReadObjectRequest{\n\t\tName: rc.name,\n\t\tGeneration: rc.generation,\n\t\tRange: &rc.byteRange,\n\t}\n\n\twrapped, err := rc.bucket.wrapped.NewReader(rc.ctx, req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trc.wrapped = wrapped\n\treturn\n}\n\n\/\/ Set up the wrapped reader if necessary, and make one attempt to read through\n\/\/ it.\n\/\/\n\/\/ Clears the wrapped reader on error.\nfunc (rc *retryObjectReader) readOnce(p []byte) (n int, err error) {\n\t\/\/ Set up the wrapped reader if it's not already around.\n\tif rc.wrapped == nil {\n\t\terr = rc.setUpWrapped()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Attempt to read from it.\n\tn, err = rc.wrapped.Read(p)\n\tif err != nil {\n\t\trc.wrapped.Close()\n\t\trc.wrapped = nil\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Invariant: we never return an error from this function unless we've given up\n\/\/ on retrying. In particular, we won't return a short read because the wrapped\n\/\/ reader returned a short read and an error.\nfunc (rc *retryObjectReader) Read(p []byte) (n int, err error) {\n\t\/\/ Whatever we do, accumulate the bytes that we're returning to the user.\n\tdefer func() {\n\t\tif n < 0 {\n\t\t\tpanic(fmt.Sprintf(\"Negative byte count: %d\", n))\n\t\t}\n\n\t\trc.byteRange.Start += uint64(n)\n\t}()\n\n\t\/\/ If we've already decided on a permanent error, return that.\n\tif rc.permanentErr != nil {\n\t\terr = rc.permanentErr\n\t\treturn\n\t}\n\n\t\/\/ If we let an error escape below, it must be a permanent one.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\trc.permanentErr = err\n\t\t}\n\t}()\n\n\t\/\/ We will repeatedly make single attempts until we get a successful request.\n\t\/\/ Don't forget to accumulate the result each time.\n\ttryOnce := func() (err error) {\n\t\tvar bytesRead int\n\t\tbytesRead, err = rc.readOnce(p)\n\t\tn += bytesRead\n\t\tp = p[bytesRead:]\n\n\t\treturn\n\t}\n\n\terr = expBackoff(\n\t\trc.ctx,\n\t\tfmt.Sprintf(\"Read(%q, %d)\", rc.name, rc.generation),\n\t\trc.bucket.maxSleep,\n\t\ttryOnce,\n\t\t&rc.sleepCount,\n\t\t&rc.sleepDuration)\n\n\treturn\n}\n\nfunc (rc *retryObjectReader) Close() (err error) {\n\t\/\/ If we don't have a wrapped reader, there is nothing useful that we can or\n\t\/\/ need to do here.\n\tif rc.wrapped == nil {\n\t\treturn\n\t}\n\n\t\/\/ Call through.\n\terr = rc.wrapped.Close()\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (rb *retryBucket) Name() (name string) {\n\tname = rb.wrapped.Name()\n\treturn\n}\n\nfunc (rb *retryBucket) NewReader(\n\tctx context.Context,\n\treq *ReadObjectRequest) (rc io.ReadCloser, err error) {\n\t\/\/ If the user specified the latest generation, we need to figure out what\n\t\/\/ that is so that we can create a reader that knows how to keep a stable\n\t\/\/ generation despite retrying repeatedly.\n\tvar generation int64 = req.Generation\n\tvar sleepCount uint\n\tvar sleepDuration time.Duration\n\n\tif generation == 0 {\n\t\tfindGeneration := func() (err error) {\n\t\t\to, err := rb.wrapped.StatObject(\n\t\t\t\tctx,\n\t\t\t\t&StatObjectRequest{\n\t\t\t\t\tName: req.Name,\n\t\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tgeneration = o.Generation\n\t\t\treturn\n\t\t}\n\n\t\terr = expBackoff(\n\t\t\tctx,\n\t\t\tfmt.Sprintf(\"FindLatestGeneration(%q)\", req.Name),\n\t\t\trb.maxSleep,\n\t\t\tfindGeneration,\n\t\t\t&sleepCount,\n\t\t\t&sleepDuration)\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Choose an appropriate byte range.\n\tbyteRange := ByteRange{0, math.MaxUint64}\n\tif req.Range != nil {\n\t\tbyteRange = *req.Range\n\t}\n\n\t\/\/ Now that we know what generation we're looking for, return an appropriate\n\t\/\/ reader that knows how to retry when the connection fails. Make sure to\n\t\/\/ inherit the time spent sleeping above.\n\trc = &retryObjectReader{\n\t\tbucket: rb,\n\t\tctx: ctx,\n\n\t\tname: req.Name,\n\t\tgeneration: generation,\n\t\tbyteRange: byteRange,\n\n\t\tsleepCount: sleepCount,\n\t\tsleepDuration: sleepDuration,\n\t}\n\n\treturn\n}\n\nfunc (rb *retryBucket) CreateObject(\n\tctx context.Context,\n\treq *CreateObjectRequest) (o *Object, err error) {\n\tvar seeker io.ReadSeeker\n\tif readSeeker, ok := req.Contents.(io.ReadSeeker); ok {\n\t\tseeker = readSeeker\n\t} else {\n\t\t\/\/ We can't simply replay the request multiple times, because the first\n\t\t\/\/ attempt might exhaust some of the req.Contents reader, leaving\n\t\t\/\/ missing contents for the second attempt.\n\t\t\/\/\n\t\t\/\/ So, copy out all contents and create a copy of the request that we\n\t\t\/\/ will modify to serve from memory for each call.\n\t\tdata, err := ioutil.ReadAll(req.Contents)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"ioutil.ReadAll: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tseeker = bytes.NewReader(data)\n\t}\n\n\treqCopy := *req\n\n\t\/\/ Call through with that request.\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"CreateObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\tif _, err = seeker.Seek(0, io.SeekStart); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treqCopy.Contents = seeker\n\t\t\to, err = rb.wrapped.CreateObject(ctx, &reqCopy)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) CopyObject(\n\tctx context.Context,\n\treq *CopyObjectRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"CopyObject(%q, %q)\", req.SrcName, req.DstName),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.CopyObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) ComposeObjects(\n\tctx context.Context,\n\treq *ComposeObjectsRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"ComposeObjects(%q)\", req.DstName),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.ComposeObjects(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) StatObject(\n\tctx context.Context,\n\treq *StatObjectRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"StatObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.StatObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) ListObjects(\n\tctx context.Context,\n\treq *ListObjectsRequest) (listing *Listing, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"ListObjects(%q)\", req.Prefix),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\tlisting, err = rb.wrapped.ListObjects(ctx, req)\n\t\t\treturn\n\t\t})\n\treturn\n}\n\nfunc (rb *retryBucket) UpdateObject(\n\tctx context.Context,\n\treq *UpdateObjectRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"UpdateObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.UpdateObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) DeleteObject(\n\tctx context.Context,\n\treq *DeleteObjectRequest) (err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"DeleteObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\terr = rb.wrapped.DeleteObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n<commit_msg>Unit Test for BucketManager<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\n\/\/ A bucket that wraps another, calling its methods in a retry loop with\n\/\/ randomized exponential backoff.\ntype retryBucket struct {\n\tmaxSleep time.Duration\n\twrapped Bucket\n}\n\nfunc newRetryBucket(\n\tmaxSleep time.Duration,\n\twrapped Bucket) (b Bucket) {\n\tb = &retryBucket{\n\t\tmaxSleep: maxSleep,\n\t\twrapped: wrapped,\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc shouldRetry(err error) (b bool) {\n\t\/\/ HTTP 50x errors.\n\tif typed, ok := err.(*googleapi.Error); ok {\n\t\tif typed.Code >= 500 && typed.Code < 600 {\n\t\t\tb = true\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ HTTP 429 errors (GCS uses these for rate limiting).\n\tif typed, ok := err.(*googleapi.Error); ok {\n\t\tif typed.Code == 429 {\n\t\t\tb = true\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Network errors, which tend to show up transiently when doing lots of\n\t\/\/ operations in parallel. For example:\n\t\/\/\n\t\/\/ dial tcp 74.125.203.95:443: too many open files\n\t\/\/\n\tif _, ok := err.(*net.OpError); ok {\n\t\tb = true\n\t\treturn\n\t}\n\n\t\/\/ The HTTP package returns ErrUnexpectedEOF in several places. This seems to\n\t\/\/ come up when the server terminates the connection in the middle of an\n\t\/\/ object read.\n\tif err == io.ErrUnexpectedEOF {\n\t\tb = true\n\t\treturn\n\t}\n\n\t\/\/ The HTTP library also appears to leak EOF errors from... somewhere in its\n\t\/\/ guts as URL errors sometimes.\n\tif urlErr, ok := err.(*url.Error); ok {\n\t\tif urlErr.Err == io.EOF {\n\t\t\tb = true\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Sometimes the HTTP package helpfully encapsulates the real error in a URL\n\t\/\/ error.\n\tif urlErr, ok := err.(*url.Error); ok {\n\t\tb = shouldRetry(urlErr.Err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Choose an appropriate delay for exponential backoff, given that we have\n\/\/ already slept the given number of times for this logical request.\nfunc chooseDelay(prevSleepCount uint) (d time.Duration) {\n\tconst baseDelay = time.Millisecond\n\n\t\/\/ Choose a a delay in [0, 2^prevSleepCount * baseDelay).\n\td = (1 << prevSleepCount) * baseDelay\n\td = time.Duration(rand.Int63n(int64(d)))\n\n\treturn\n}\n\n\/\/ Exponential backoff for a function that might fail.\n\/\/\n\/\/ This is essentially what is described in the \"Best practices\" section of the\n\/\/ \"Upload Objects\" docs:\n\/\/\n\/\/ https:\/\/cloud.google.com\/storage\/docs\/json_api\/v1\/how-tos\/upload\n\/\/\n\/\/ with the following exceptions:\n\/\/\n\/\/ * We perform backoff for all operations.\n\/\/\n\/\/ * The random component scales with the delay, so that the first sleep\n\/\/ cannot be as long as one second. The algorithm used matches the\n\/\/ description at http:\/\/en.wikipedia.org\/wiki\/Exponential_backoff.\n\/\/\n\/\/ * We retry more types of errors; see shouldRetry above.\n\/\/\n\/\/ State for total sleep time and number of previous sleeps is housed outside\n\/\/ of this function to allow it to be \"resumed\" by multiple invocations of\n\/\/ retryObjectReader.Read.\nfunc expBackoff(\n\tctx context.Context,\n\tdesc string,\n\tmaxSleep time.Duration,\n\tf func() error,\n\tprevSleepCount *uint,\n\tprevSleepDuration *time.Duration) (err error) {\n\tfor {\n\t\t\/\/ Make an attempt. Stop if successful.\n\t\terr = f()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Do we want to retry?\n\t\tif !shouldRetry(err) {\n\t\t\t\/\/ Special case: don't spam up the logs for EOF, which io.Reader returns\n\t\t\t\/\/ in the normal course of things.\n\t\t\tif err != io.EOF {\n\t\t\t\terr = fmt.Errorf(\"not retrying %s: %w\", desc, err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Choose a a delay.\n\t\td := chooseDelay(*prevSleepCount)\n\t\t*prevSleepCount++\n\n\t\t\/\/ Are we out of credit?\n\t\tif *prevSleepDuration+d > maxSleep {\n\t\t\t\/\/ Return the most recent error.\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Sleep, returning early if cancelled.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ On cancellation, return the last error we saw.\n\t\t\treturn\n\n\t\tcase <-time.After(d):\n\t\t\t*prevSleepDuration += d\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n\/\/ Like expBackoff, but assumes that we've never slept before (and won't need\n\/\/ to sleep again).\nfunc oneShotExpBackoff(\n\tctx context.Context,\n\tdesc string,\n\tmaxSleep time.Duration,\n\tf func() error) (err error) {\n\tvar prevSleepCount uint\n\tvar prevSleepDuration time.Duration\n\n\terr = expBackoff(\n\t\tctx,\n\t\tdesc,\n\t\tmaxSleep,\n\t\tf,\n\t\t&prevSleepCount,\n\t\t&prevSleepDuration)\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Read support\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype retryObjectReader struct {\n\tbucket *retryBucket\n\n\t\/\/ The context we should watch when sleeping for retries.\n\tctx context.Context\n\n\t\/\/ What we are trying to read.\n\tname string\n\tgeneration int64\n\tbyteRange ByteRange\n\n\t\/\/ nil when we start or have seen a permanent error.\n\twrapped io.ReadCloser\n\n\t\/\/ If we've seen an error that we shouldn't retry for, this will be non-nil\n\t\/\/ and should be returned permanently.\n\tpermanentErr error\n\n\t\/\/ The number of times we've slept so far, and the total amount of time we've\n\t\/\/ spent sleeping.\n\tsleepCount uint\n\tsleepDuration time.Duration\n}\n\n\/\/ Set up the wrapped reader.\nfunc (rc *retryObjectReader) setUpWrapped() (err error) {\n\t\/\/ Call through to create the reader.\n\treq := &ReadObjectRequest{\n\t\tName: rc.name,\n\t\tGeneration: rc.generation,\n\t\tRange: &rc.byteRange,\n\t}\n\n\twrapped, err := rc.bucket.wrapped.NewReader(rc.ctx, req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trc.wrapped = wrapped\n\treturn\n}\n\n\/\/ Set up the wrapped reader if necessary, and make one attempt to read through\n\/\/ it.\n\/\/\n\/\/ Clears the wrapped reader on error.\nfunc (rc *retryObjectReader) readOnce(p []byte) (n int, err error) {\n\t\/\/ Set up the wrapped reader if it's not already around.\n\tif rc.wrapped == nil {\n\t\terr = rc.setUpWrapped()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Attempt to read from it.\n\tn, err = rc.wrapped.Read(p)\n\tif err != nil {\n\t\trc.wrapped.Close()\n\t\trc.wrapped = nil\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Invariant: we never return an error from this function unless we've given up\n\/\/ on retrying. In particular, we won't return a short read because the wrapped\n\/\/ reader returned a short read and an error.\nfunc (rc *retryObjectReader) Read(p []byte) (n int, err error) {\n\t\/\/ Whatever we do, accumulate the bytes that we're returning to the user.\n\tdefer func() {\n\t\tif n < 0 {\n\t\t\tpanic(fmt.Sprintf(\"Negative byte count: %d\", n))\n\t\t}\n\n\t\trc.byteRange.Start += uint64(n)\n\t}()\n\n\t\/\/ If we've already decided on a permanent error, return that.\n\tif rc.permanentErr != nil {\n\t\terr = rc.permanentErr\n\t\treturn\n\t}\n\n\t\/\/ If we let an error escape below, it must be a permanent one.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\trc.permanentErr = err\n\t\t}\n\t}()\n\n\t\/\/ We will repeatedly make single attempts until we get a successful request.\n\t\/\/ Don't forget to accumulate the result each time.\n\ttryOnce := func() (err error) {\n\t\tvar bytesRead int\n\t\tbytesRead, err = rc.readOnce(p)\n\t\tn += bytesRead\n\t\tp = p[bytesRead:]\n\n\t\treturn\n\t}\n\n\terr = expBackoff(\n\t\trc.ctx,\n\t\tfmt.Sprintf(\"Read(%q, %d)\", rc.name, rc.generation),\n\t\trc.bucket.maxSleep,\n\t\ttryOnce,\n\t\t&rc.sleepCount,\n\t\t&rc.sleepDuration)\n\n\treturn\n}\n\nfunc (rc *retryObjectReader) Close() (err error) {\n\t\/\/ If we don't have a wrapped reader, there is nothing useful that we can or\n\t\/\/ need to do here.\n\tif rc.wrapped == nil {\n\t\treturn\n\t}\n\n\t\/\/ Call through.\n\terr = rc.wrapped.Close()\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (rb *retryBucket) Name() (name string) {\n\tname = rb.wrapped.Name()\n\treturn\n}\n\nfunc (rb *retryBucket) NewReader(\n\tctx context.Context,\n\treq *ReadObjectRequest) (rc io.ReadCloser, err error) {\n\t\/\/ If the user specified the latest generation, we need to figure out what\n\t\/\/ that is so that we can create a reader that knows how to keep a stable\n\t\/\/ generation despite retrying repeatedly.\n\tvar generation int64 = req.Generation\n\tvar sleepCount uint\n\tvar sleepDuration time.Duration\n\n\tif generation == 0 {\n\t\tfindGeneration := func() (err error) {\n\t\t\to, err := rb.wrapped.StatObject(\n\t\t\t\tctx,\n\t\t\t\t&StatObjectRequest{\n\t\t\t\t\tName: req.Name,\n\t\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tgeneration = o.Generation\n\t\t\treturn\n\t\t}\n\n\t\terr = expBackoff(\n\t\t\tctx,\n\t\t\tfmt.Sprintf(\"FindLatestGeneration(%q)\", req.Name),\n\t\t\trb.maxSleep,\n\t\t\tfindGeneration,\n\t\t\t&sleepCount,\n\t\t\t&sleepDuration)\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Choose an appropriate byte range.\n\tbyteRange := ByteRange{0, math.MaxUint64}\n\tif req.Range != nil {\n\t\tbyteRange = *req.Range\n\t}\n\n\t\/\/ Now that we know what generation we're looking for, return an appropriate\n\t\/\/ reader that knows how to retry when the connection fails. Make sure to\n\t\/\/ inherit the time spent sleeping above.\n\trc = &retryObjectReader{\n\t\tbucket: rb,\n\t\tctx: ctx,\n\n\t\tname: req.Name,\n\t\tgeneration: generation,\n\t\tbyteRange: byteRange,\n\n\t\tsleepCount: sleepCount,\n\t\tsleepDuration: sleepDuration,\n\t}\n\n\treturn\n}\n\nfunc (rb *retryBucket) CreateObject(\n\tctx context.Context,\n\treq *CreateObjectRequest) (o *Object, err error) {\n\tvar seeker io.ReadSeeker\n\tif readSeeker, ok := req.Contents.(io.ReadSeeker); ok {\n\t\tseeker = readSeeker\n\t} else {\n\t\t\/\/ We can't simply replay the request multiple times, because the first\n\t\t\/\/ attempt might exhaust some of the req.Contents reader, leaving\n\t\t\/\/ missing contents for the second attempt.\n\t\t\/\/\n\t\t\/\/ So, copy out all contents and create a copy of the request that we\n\t\t\/\/ will modify to serve from memory for each call.\n\t\tdata, err := ioutil.ReadAll(req.Contents)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"ioutil.ReadAll: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tseeker = bytes.NewReader(data)\n\t}\n\n\treqCopy := *req\n\n\t\/\/ Call through with that request.\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"CreateObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\tif _, err = seeker.Seek(0, io.SeekStart); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treqCopy.Contents = seeker\n\t\t\to, err = rb.wrapped.CreateObject(ctx, &reqCopy)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) CopyObject(\n\tctx context.Context,\n\treq *CopyObjectRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"CopyObject(%q, %q)\", req.SrcName, req.DstName),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.CopyObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) ComposeObjects(\n\tctx context.Context,\n\treq *ComposeObjectsRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"ComposeObjects(%q)\", req.DstName),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.ComposeObjects(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) StatObject(\n\tctx context.Context,\n\treq *StatObjectRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"StatObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.StatObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) ListObjects(\n\tctx context.Context,\n\treq *ListObjectsRequest) (listing *Listing, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"ListObjects(%q)\", req.Prefix),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\tlisting, err = rb.wrapped.ListObjects(ctx, req)\n\t\t\treturn\n\t\t})\n\treturn\n}\n\nfunc (rb *retryBucket) UpdateObject(\n\tctx context.Context,\n\treq *UpdateObjectRequest) (o *Object, err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"UpdateObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\to, err = rb.wrapped.UpdateObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n\nfunc (rb *retryBucket) DeleteObject(\n\tctx context.Context,\n\treq *DeleteObjectRequest) (err error) {\n\terr = oneShotExpBackoff(\n\t\tctx,\n\t\tfmt.Sprintf(\"DeleteObject(%q)\", req.Name),\n\t\trb.maxSleep,\n\t\tfunc() (err error) {\n\t\t\terr = rb.wrapped.DeleteObject(ctx, req)\n\t\t\treturn\n\t\t})\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package certdata\n\nimport \"encoding\/asn1\"\n\n\/\/ Source GlobalSign CP and Cabforum BR\n\/\/ https:\/\/www.globalsign.com\/en\/repository\/GlobalSign_CP_v5.3.pdf\nvar polOidType []oidType\n\ntype oidType struct {\n\tObjectIdentifier asn1.ObjectIdentifier\n\tType string\n}\n\n\/*\nfunc getType(oid asn1.ObjectIdentifier) string {\n\tfor _, oidt := range polOidType {\n\t\tif oid.Equal(oidt.ObjectIdentifier) {\n\t\t\treturn oidt.Type\n\t\t}\n\t}\n\treturn \"\"\n}*\/\n\nfunc getType(oid []asn1.ObjectIdentifier) string {\n\tfor _, poid := range oid {\n\t\tfor _, oidt := range polOidType {\n\t\t\tif poid.Equal(oidt.ObjectIdentifier) {\n\t\t\t\treturn oidt.Type\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ TODO: Can we handle this differently, we might want to use a constant here?\nfunc init() {\n\t\/\/ Extended Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 1}, \"EV\"}) \/\/ Extended Validation Certificates Policy – SSL\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 2}, \"CS\"}) \/\/ Extended Validation Certificates Policy – Code Signing\n\n\t\/\/ Domain Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 10}, \"DV\"}) \/\/ Domain Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 10, 10}, \"DV\"}) \/\/ Domain Validation Certificates Policy – AlphaSSL\n\n\t\/\/ Organization Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 20}, \"OV\"}) \/\/ Organization Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 21}, \"-\"}) \/\/ Untrusted OneClickSSL Test Certificate (not in cp)\n\n\t\/\/ Intranet Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 25}, \"IN\"}) \/\/ IntranetSSL Validation Certificates Policy\n\n\t\/\/ Time Stamping\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 30}, \"TS\"}) \/\/ Time Stamping Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 31}, \"TS\"}) \/\/ Time Stamping Certificates Policy – AATL\n\n\t\/\/ Client Certificates\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40}, \"PS\"}) \/\/ Client Certificates Policy (Generic)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 10}, \"PS\"}) \/\/ Client Certificates Policy (ePKI – Enterprise PKI)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 20}, \"PS\"}) \/\/ Client Certificates Policy (JCAN – Japan CA Network)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 30}, \"PS\"}) \/\/ Client Certificates Policy (AATL)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 40}, \"PS\"}) \/\/ Client Certificates Policy (ePKI for private CAs)\n\n\t\/\/ Code Signing\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 50}, \"CS\"}) \/\/ Code Signing Certificates Policy\n\n\t\/\/ CA Chaining and Cross Signing\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 60}, \"CA\"}) \/\/ CA Chaining Policy – Trusted Root and Hosted Root\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 60, 1}, \"CA\"}) \/\/ CA Chaining Policy – Trusted Root (Baseline Requirements Compatible)\n\n\t\/\/ Others\n\t\/*polOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,80}, \"XX\"}) \/\/ Retail Industry Electronic Data Interchange Client Certificate Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,81}, \"XX\"}) \/\/ Retail Industry Electronic Data Interchange Server Certificate Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,90}, \"XX\"}) \/\/ Trusted Root TPM Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,95}, \"XX\"}) \/\/ Online Certificate Status Protocol Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,70}, \"XX\"}) \/\/ High Volume CA Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,26}, \"XX\"}) \/\/ Test Certificate Policy (Should not be trusted)\n\n\t\/\/ In addition to these identifiers, all Certificates that comply with the NAESB Business\n\t\/\/ Practice Standards will include one of the following additional identifiers:-\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,1,2}, \"XX\"}) \/\/ NAESB Rudimentary Assurance\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,2,2}, \"XX\"}) \/\/ NAESB Basic Assurance\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,3,2}, \"XX\"}) \/\/ NAESB Medium Assurance\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,4,2}, \"XX\"}) \/\/ NAESB High Assurance\n\t*\/\n\t\/\/ In addition to these identifiers, all Certificates that comply with the Baseline\n\t\/\/ Requirements will include the following additional identifiers:-\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 1}, \"EV\"}) \/\/ Extended Validation Certificate Policy\n\t\/\/polOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,23,140,1,2}, \"\"}) \/\/ BR Compliance Certificate Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 3}, \"EVCS\"}) \/\/ Extended Validation Code Signing Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 4}, \"CS\"}) \/\/ BR Compliance Code Signing Certificates Policy\n\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 2, 1}, \"DV\"}) \/\/ Domain Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 2, 2}, \"OV\"}) \/\/ Organization Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 2, 3}, \"IV\"}) \/\/ Individual Validation Certificates Policy\n\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 2, 840, 113583, 1, 2, 1}, \"PS\"}) \/\/ Adobe Certificate Policy Attribute Object Identifier (PDF)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 2, 840, 113583, 1, 2, 2}, \"PS\"}) \/\/ Test Adobe Certificate Policy Attribute Object Identifier\n\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 30, 2}, \"PS\"}) \/\/ AATL Adobe Certificate Policy Attribute Object Identifier\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 2, 392, 200063, 30, 5300}, \"PS\"}) \/\/ JCAN\n}\n<commit_msg>Remove unused fuction<commit_after>package certdata\n\nimport \"encoding\/asn1\"\n\n\/\/ Source GlobalSign CP and Cabforum BR\n\/\/ https:\/\/www.globalsign.com\/en\/repository\/GlobalSign_CP_v5.3.pdf\nvar polOidType []oidType\n\ntype oidType struct {\n\tObjectIdentifier asn1.ObjectIdentifier\n\tType string\n}\n\nfunc getType(oid []asn1.ObjectIdentifier) string {\n\tfor _, poid := range oid {\n\t\tfor _, oidt := range polOidType {\n\t\t\tif poid.Equal(oidt.ObjectIdentifier) {\n\t\t\t\treturn oidt.Type\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ TODO: Can we handle this differently, we might want to use a constant here?\nfunc init() {\n\t\/\/ Extended Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 1}, \"EV\"}) \/\/ Extended Validation Certificates Policy – SSL\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 2}, \"CS\"}) \/\/ Extended Validation Certificates Policy – Code Signing\n\n\t\/\/ Domain Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 10}, \"DV\"}) \/\/ Domain Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 10, 10}, \"DV\"}) \/\/ Domain Validation Certificates Policy – AlphaSSL\n\n\t\/\/ Organization Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 20}, \"OV\"}) \/\/ Organization Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 21}, \"-\"}) \/\/ Untrusted OneClickSSL Test Certificate (not in cp)\n\n\t\/\/ Intranet Validation\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 25}, \"IN\"}) \/\/ IntranetSSL Validation Certificates Policy\n\n\t\/\/ Time Stamping\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 30}, \"TS\"}) \/\/ Time Stamping Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 31}, \"TS\"}) \/\/ Time Stamping Certificates Policy – AATL\n\n\t\/\/ Client Certificates\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40}, \"PS\"}) \/\/ Client Certificates Policy (Generic)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 10}, \"PS\"}) \/\/ Client Certificates Policy (ePKI – Enterprise PKI)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 20}, \"PS\"}) \/\/ Client Certificates Policy (JCAN – Japan CA Network)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 30}, \"PS\"}) \/\/ Client Certificates Policy (AATL)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 40}, \"PS\"}) \/\/ Client Certificates Policy (ePKI for private CAs)\n\n\t\/\/ Code Signing\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 50}, \"CS\"}) \/\/ Code Signing Certificates Policy\n\n\t\/\/ CA Chaining and Cross Signing\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 60}, \"CA\"}) \/\/ CA Chaining Policy – Trusted Root and Hosted Root\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 60, 1}, \"CA\"}) \/\/ CA Chaining Policy – Trusted Root (Baseline Requirements Compatible)\n\n\t\/\/ Others\n\t\/*polOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,80}, \"XX\"}) \/\/ Retail Industry Electronic Data Interchange Client Certificate Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,81}, \"XX\"}) \/\/ Retail Industry Electronic Data Interchange Server Certificate Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,90}, \"XX\"}) \/\/ Trusted Root TPM Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,95}, \"XX\"}) \/\/ Online Certificate Status Protocol Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,70}, \"XX\"}) \/\/ High Volume CA Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1,3,6,1,4,1,4146,1,26}, \"XX\"}) \/\/ Test Certificate Policy (Should not be trusted)\n\n\t\/\/ In addition to these identifiers, all Certificates that comply with the NAESB Business\n\t\/\/ Practice Standards will include one of the following additional identifiers:-\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,1,2}, \"XX\"}) \/\/ NAESB Rudimentary Assurance\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,2,2}, \"XX\"}) \/\/ NAESB Basic Assurance\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,3,2}, \"XX\"}) \/\/ NAESB Medium Assurance\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,16,840,1,114505,1,12,4,2}, \"XX\"}) \/\/ NAESB High Assurance\n\t*\/\n\t\/\/ In addition to these identifiers, all Certificates that comply with the Baseline\n\t\/\/ Requirements will include the following additional identifiers:-\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 1}, \"EV\"}) \/\/ Extended Validation Certificate Policy\n\t\/\/polOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2,23,140,1,2}, \"\"}) \/\/ BR Compliance Certificate Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 3}, \"EVCS\"}) \/\/ Extended Validation Code Signing Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 4}, \"CS\"}) \/\/ BR Compliance Code Signing Certificates Policy\n\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 2, 1}, \"DV\"}) \/\/ Domain Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 2, 2}, \"OV\"}) \/\/ Organization Validation Certificates Policy\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{2, 23, 140, 1, 2, 3}, \"IV\"}) \/\/ Individual Validation Certificates Policy\n\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 2, 840, 113583, 1, 2, 1}, \"PS\"}) \/\/ Adobe Certificate Policy Attribute Object Identifier (PDF)\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 2, 840, 113583, 1, 2, 2}, \"PS\"}) \/\/ Test Adobe Certificate Policy Attribute Object Identifier\n\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 4146, 1, 40, 30, 2}, \"PS\"}) \/\/ AATL Adobe Certificate Policy Attribute Object Identifier\n\tpolOidType = append(polOidType, oidType{asn1.ObjectIdentifier{1, 2, 392, 200063, 30, 5300}, \"PS\"}) \/\/ JCAN\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cloudfoundry\/cli\/cf\/configuration\/core_config\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/models\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/net\"\n)\n\ntype ApplicationSummaries struct {\n\tApps []ApplicationFromSummary\n}\n\nfunc (resource ApplicationSummaries) ToModels() (apps []models.ApplicationFields) {\n\tfor _, application := range resource.Apps {\n\t\tapps = append(apps, application.ToFields())\n\t}\n\treturn\n}\n\ntype ApplicationFromSummary struct {\n\tGuid string\n\tName string\n\tRoutes []RouteSummary\n\tServices []ServicePlanSummary\n\tRunningInstances int `json:\"running_instances\"`\n\tMemory int64\n\tInstances int\n\tDiskQuota int64 `json:\"disk_quota\"`\n\tUrls []string\n\tState string\n\tSpaceGuid string `json:\"space_guid\"`\n\tPackageUpdatedAt *time.Time `json:\"package_updated_at\"`\n}\n\nfunc (resource ApplicationFromSummary) ToFields() (app models.ApplicationFields) {\n\tapp = models.ApplicationFields{}\n\tapp.Guid = resource.Guid\n\tapp.Name = resource.Name\n\tapp.State = strings.ToLower(resource.State)\n\tapp.InstanceCount = resource.Instances\n\tapp.DiskQuota = resource.DiskQuota\n\tapp.RunningInstances = resource.RunningInstances\n\tapp.Memory = resource.Memory\n\tapp.SpaceGuid = resource.SpaceGuid\n\tapp.PackageUpdatedAt = resource.PackageUpdatedAt\n\n\treturn\n}\n\nfunc (resource ApplicationFromSummary) ToModel() (app models.Application) {\n\tapp.ApplicationFields = resource.ToFields()\n\troutes := []models.RouteSummary{}\n\tfor _, route := range resource.Routes {\n\t\troutes = append(routes, route.ToModel())\n\t}\n\tapp.Routes = routes\n\n\tservices := []models.ServicePlanSummary{}\n\tfor _, service := range resource.Services {\n\t\tservices = append(services, service.ToModel())\n\t}\n\tapp.Routes = routes\n\tapp.Services = services\n\n\treturn\n}\n\ntype RouteSummary struct {\n\tGuid string\n\tHost string\n\tDomain DomainSummary\n}\n\nfunc (resource RouteSummary) ToModel() (route models.RouteSummary) {\n\tdomain := models.DomainFields{}\n\tdomain.Guid = resource.Domain.Guid\n\tdomain.Name = resource.Domain.Name\n\tdomain.Shared = resource.Domain.OwningOrganizationGuid != \"\"\n\n\troute.Guid = resource.Guid\n\troute.Host = resource.Host\n\troute.Domain = domain\n\treturn\n}\n\nfunc (resource ServicePlanSummary) ToModel() (route models.ServicePlanSummary) {\n\troute.Guid = resource.Guid\n\troute.Name = resource.Name\n\treturn\n}\n\ntype DomainSummary struct {\n\tGuid string\n\tName string\n\tOwningOrganizationGuid string\n}\n\ntype AppSummaryRepository interface {\n\tGetSummariesInCurrentSpace() (apps []models.Application, apiErr error)\n\tGetSummary(appGuid string) (summary models.Application, apiErr error)\n}\n\ntype CloudControllerAppSummaryRepository struct {\n\tconfig core_config.Reader\n\tgateway net.Gateway\n}\n\nfunc NewCloudControllerAppSummaryRepository(config core_config.Reader, gateway net.Gateway) (repo CloudControllerAppSummaryRepository) {\n\trepo.config = config\n\trepo.gateway = gateway\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummariesInCurrentSpace() (apps []models.Application, apiErr error) {\n\tresources := new(ApplicationSummaries)\n\n\tpath := fmt.Sprintf(\"%s\/v2\/spaces\/%s\/summary\", repo.config.ApiEndpoint(), repo.config.SpaceFields().Guid)\n\tapiErr = repo.gateway.GetResource(path, resources)\n\tif apiErr != nil {\n\t\treturn\n\t}\n\n\tfor _, resource := range resources.Apps {\n\t\tapps = append(apps, resource.ToModel())\n\t}\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummary(appGuid string) (summary models.Application, apiErr error) {\n\tpath := fmt.Sprintf(\"%s\/v2\/apps\/%s\/summary\", repo.config.ApiEndpoint(), appGuid)\n\tsummaryResponse := new(ApplicationFromSummary)\n\tapiErr = repo.gateway.GetResource(path, summaryResponse)\n\tif apiErr != nil {\n\t\treturn\n\t}\n\n\tsummary = summaryResponse.ToModel()\n\treturn\n}\n<commit_msg>populates EnvironmentVars when hitting app\/summary endpoint<commit_after>package api\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cloudfoundry\/cli\/cf\/configuration\/core_config\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/models\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/net\"\n)\n\ntype ApplicationSummaries struct {\n\tApps []ApplicationFromSummary\n}\n\nfunc (resource ApplicationSummaries) ToModels() (apps []models.ApplicationFields) {\n\tfor _, application := range resource.Apps {\n\t\tapps = append(apps, application.ToFields())\n\t}\n\treturn\n}\n\ntype ApplicationFromSummary struct {\n\tGuid string\n\tName string\n\tRoutes []RouteSummary\n\tServices []ServicePlanSummary\n\tRunningInstances int `json:\"running_instances\"`\n\tMemory int64\n\tInstances int\n\tDiskQuota int64 `json:\"disk_quota\"`\n\tUrls []string\n\tEnvironmentVars map[string]interface{} `json:\"environment_json,omitempty\"`\n\tState string\n\tSpaceGuid string `json:\"space_guid\"`\n\tPackageUpdatedAt *time.Time `json:\"package_updated_at\"`\n}\n\nfunc (resource ApplicationFromSummary) ToFields() (app models.ApplicationFields) {\n\tapp = models.ApplicationFields{}\n\tapp.Guid = resource.Guid\n\tapp.Name = resource.Name\n\tapp.State = strings.ToLower(resource.State)\n\tapp.InstanceCount = resource.Instances\n\tapp.DiskQuota = resource.DiskQuota\n\tapp.RunningInstances = resource.RunningInstances\n\tapp.Memory = resource.Memory\n\tapp.SpaceGuid = resource.SpaceGuid\n\tapp.PackageUpdatedAt = resource.PackageUpdatedAt\n\n\treturn\n}\n\nfunc (resource ApplicationFromSummary) ToModel() (app models.Application) {\n\tapp.ApplicationFields = resource.ToFields()\n\troutes := []models.RouteSummary{}\n\tfor _, route := range resource.Routes {\n\t\troutes = append(routes, route.ToModel())\n\t}\n\tapp.Routes = routes\n\n\tservices := []models.ServicePlanSummary{}\n\tfor _, service := range resource.Services {\n\t\tservices = append(services, service.ToModel())\n\t}\n\n\tapp.EnvironmentVars = resource.EnvironmentVars\n\tapp.Routes = routes\n\tapp.Services = services\n\n\treturn\n}\n\ntype RouteSummary struct {\n\tGuid string\n\tHost string\n\tDomain DomainSummary\n}\n\nfunc (resource RouteSummary) ToModel() (route models.RouteSummary) {\n\tdomain := models.DomainFields{}\n\tdomain.Guid = resource.Domain.Guid\n\tdomain.Name = resource.Domain.Name\n\tdomain.Shared = resource.Domain.OwningOrganizationGuid != \"\"\n\n\troute.Guid = resource.Guid\n\troute.Host = resource.Host\n\troute.Domain = domain\n\treturn\n}\n\nfunc (resource ServicePlanSummary) ToModel() (route models.ServicePlanSummary) {\n\troute.Guid = resource.Guid\n\troute.Name = resource.Name\n\treturn\n}\n\ntype DomainSummary struct {\n\tGuid string\n\tName string\n\tOwningOrganizationGuid string\n}\n\ntype AppSummaryRepository interface {\n\tGetSummariesInCurrentSpace() (apps []models.Application, apiErr error)\n\tGetSummary(appGuid string) (summary models.Application, apiErr error)\n}\n\ntype CloudControllerAppSummaryRepository struct {\n\tconfig core_config.Reader\n\tgateway net.Gateway\n}\n\nfunc NewCloudControllerAppSummaryRepository(config core_config.Reader, gateway net.Gateway) (repo CloudControllerAppSummaryRepository) {\n\trepo.config = config\n\trepo.gateway = gateway\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummariesInCurrentSpace() (apps []models.Application, apiErr error) {\n\tresources := new(ApplicationSummaries)\n\n\tpath := fmt.Sprintf(\"%s\/v2\/spaces\/%s\/summary\", repo.config.ApiEndpoint(), repo.config.SpaceFields().Guid)\n\tapiErr = repo.gateway.GetResource(path, resources)\n\tif apiErr != nil {\n\t\treturn\n\t}\n\n\tfor _, resource := range resources.Apps {\n\t\tapps = append(apps, resource.ToModel())\n\t}\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummary(appGuid string) (summary models.Application, apiErr error) {\n\tpath := fmt.Sprintf(\"%s\/v2\/apps\/%s\/summary\", repo.config.ApiEndpoint(), appGuid)\n\tsummaryResponse := new(ApplicationFromSummary)\n\tapiErr = repo.gateway.GetResource(path, summaryResponse)\n\tif apiErr != nil {\n\t\treturn\n\t}\n\n\tsummary = summaryResponse.ToModel()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package discovery\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/libp2p\/go-libp2p-core\/peer\"\n\n\tlogging \"github.com\/ipfs\/go-log\"\n)\n\nvar log = logging.Logger(\"discovery\")\n\n\/\/ FindPeers is a utility function that synchronously collects peers from a Discoverer.\nfunc FindPeers(ctx context.Context, d Discoverer, ns string, opts ...Option) ([]peer.AddrInfo, error) {\n\tvar res []peer.AddrInfo\n\n\tch, err := d.FindPeers(ctx, ns, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor pi := range ch {\n\t\tres = append(res, pi)\n\t}\n\n\treturn res, nil\n}\n\n\/\/ Advertise is a utility function that persistently advertises a service through an Advertiser.\nfunc Advertise(ctx context.Context, a Advertiser, ns string, opts ...Option) {\n\tgo func() {\n\t\tfor {\n\t\t\tttl, err := a.Advertise(ctx, ns, opts...)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Error advertising %s: %s\", ns, err.Error())\n\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(2 * time.Minute):\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twait := 7 * ttl \/ 8\n\t\t\tselect {\n\t\t\tcase <-time.After(wait):\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n<commit_msg>remove deprecated types<commit_after>package discovery\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/libp2p\/go-libp2p-core\/discovery\"\n\t\"github.com\/libp2p\/go-libp2p-core\/peer\"\n\n\tlogging \"github.com\/ipfs\/go-log\"\n)\n\nvar log = logging.Logger(\"discovery\")\n\n\/\/ FindPeers is a utility function that synchronously collects peers from a Discoverer.\nfunc FindPeers(ctx context.Context, d discovery.Discoverer, ns string, opts ...discovery.Option) ([]peer.AddrInfo, error) {\n\tvar res []peer.AddrInfo\n\n\tch, err := d.FindPeers(ctx, ns, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor pi := range ch {\n\t\tres = append(res, pi)\n\t}\n\n\treturn res, nil\n}\n\n\/\/ Advertise is a utility function that persistently advertises a service through an Advertiser.\nfunc Advertise(ctx context.Context, a discovery.Advertiser, ns string, opts ...discovery.Option) {\n\tgo func() {\n\t\tfor {\n\t\t\tttl, err := a.Advertise(ctx, ns, opts...)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Error advertising %s: %s\", ns, err.Error())\n\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(2 * time.Minute):\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twait := 7 * ttl \/ 8\n\t\t\tselect {\n\t\t\tcase <-time.After(wait):\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"strings\"\n\n\t\"sync\"\n\n\t\"errors\"\n\n\t\"github.com\/piotrkowalczuk\/charon\"\n)\n\n\/\/ Permission returns Permission value that is concatenated\n\/\/ using entity properties like subsystem, module and action.\nfunc (pe *permissionEntity) Permission() charon.Permission {\n\treturn charon.Permission(pe.Subsystem + \":\" + pe.Module + \":\" + pe.Action)\n}\n\n\/\/ PermissionRepository ...\ntype PermissionRepository interface {\n\tFind(criteria *permissionCriteria) ([]*permissionEntity, error)\n\tFindOneByID(id int64) (entity *permissionEntity, err error)\n\tFindByUserID(userID int64) (entities []*permissionEntity, err error)\n\tRegister(permissions charon.Permissions) (created, untouched, removed int64, err error)\n\tInsert(entity *permissionEntity) (*permissionEntity, error)\n}\n\nfunc newPermissionRepository(dbPool *sql.DB) *permissionRepository {\n\treturn &permissionRepository{\n\t\tdb: dbPool,\n\t\ttable: tablePermission,\n\t\tcolumns: tablePermissionColumns,\n\t}\n}\n\n\/\/ FindByUserID retrieves all permissions for user represented by given id.\nfunc (pr *permissionRepository) FindByUserID(userID int64) ([]*permissionEntity, error) {\n\tquery := `\n\t\tSELECT DISTINCT ON (p.id)\n\t\t\t` + columns(tablePermissionColumns, \"p\") + `\n\t\tFROM ` + pr.table + ` AS p\n\t\tLEFT JOIN ` + tableUserPermissions + ` AS up ON up.permission_id = p.id AND up.user_id = $1\n\t\tLEFT JOIN ` + tableUserGroups + ` AS ug ON ug.user_id = $1\n\t\tLEFT JOIN ` + tableGroupPermissions + ` AS gp ON gp.permission_id = p.id AND gp.group_id = ug.group_id\n\t\tWHERE up.user_id = $1 OR ug.user_id = $1\n\t`\n\n\trows, err := pr.db.Query(query, userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tpermissions := []*permissionEntity{}\n\tfor rows.Next() {\n\t\tvar p permissionEntity\n\t\terr = rows.Scan(\n\t\t\t&p.Action,\n\t\t\t&p.CreatedAt,\n\t\t\t&p.ID,\n\t\t\t&p.Module,\n\t\t\t&p.Subsystem,\n\t\t\t&p.UpdatedAt,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpermissions = append(permissions, &p)\n\t}\n\tif rows.Err() != nil {\n\t\treturn nil, rows.Err()\n\t}\n\n\treturn permissions, nil\n}\n\nfunc (pr *permissionRepository) findOneStmt() (*sql.Stmt, error) {\n\treturn pr.db.Prepare(\n\t\t\"SELECT \" + strings.Join(tablePermissionColumns, \",\") + \" \" +\n\t\t\t\"FROM \" + pr.table + \" AS p \" +\n\t\t\t\"WHERE p.subsystem = $1 AND p.module = $2 AND p.action = $3\",\n\t)\n}\n\nfunc (pr *permissionRepository) Register(permissions charon.Permissions) (created, unt, removed int64, err error) {\n\tvar (\n\t\ttx *sql.Tx\n\t\tinsert, delete *sql.Stmt\n\t\trows *sql.Rows\n\t\tres sql.Result\n\t\tsubsystem string\n\t\tentities []*permissionEntity\n\t\taffected int64\n\t)\n\tif len(permissions) == 0 {\n\t\treturn 0, 0, 0, errors.New(\"charond: empty slice, permissions cannot be registered\")\n\t}\n\n\tsubsystem = permissions[0].Subsystem()\n\tif subsystem == \"\" {\n\t\treturn 0, 0, 0, errors.New(\"charond: subsystem name is empty string, permissions cannot be registered\")\n\t}\n\n\tfor _, p := range permissions {\n\t\tif p.Subsystem() != subsystem {\n\t\t\treturn 0, 0, 0, errors.New(\"charond: provided permissions do not belong to one subsystem, permissions cannot be registered\")\n\t\t}\n\t}\n\n\ttx, err = pr.db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t\tunt = untouched(int64(len(permissions)), created, removed)\n\t\t}\n\t}()\n\n\trows, err = tx.Query(\"SELECT \"+strings.Join(tablePermissionColumns, \",\")+\" FROM \"+pr.table+\" AS p WHERE p.subsystem = $1\", subsystem)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tentities = []*permissionEntity{}\n\tfor rows.Next() {\n\t\tvar entity permissionEntity\n\t\terr = rows.Scan(\n\t\t\t&entity.Action,\n\t\t\t&entity.CreatedAt,\n\t\t\t&entity.ID,\n\t\t\t&entity.Module,\n\t\t\t&entity.Subsystem,\n\t\t\t&entity.UpdatedAt,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tentities = append(entities, &entity)\n\t}\n\tif rows.Err() != nil {\n\t\treturn 0, 0, 0, rows.Err()\n\t}\n\n\tinsert, err = tx.Prepare(\"INSERT INTO \" + pr.table + \" (subsystem, module, action) VALUES ($1, $2, $3)\")\n\tif err != nil {\n\t\treturn\n\t}\n\nMissingPermissionsLoop:\n\tfor _, p := range permissions {\n\t\tfor _, e := range entities {\n\t\t\tif p == e.Permission() {\n\t\t\t\tcontinue MissingPermissionsLoop\n\t\t\t}\n\t\t}\n\n\t\tif res, err = insert.Exec(p.Split()); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif affected, err = res.RowsAffected(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tcreated += affected\n\t}\n\n\tdelete, err = tx.Prepare(\"DELETE FROM \" + pr.table + \" AS p WHERE p.id = $1\")\n\tif err != nil {\n\t\treturn\n\t}\n\nRedundantPermissionsLoop:\n\tfor _, e := range entities {\n\t\tfor _, p := range permissions {\n\t\t\tif e.Permission() == p {\n\t\t\t\tcontinue RedundantPermissionsLoop\n\t\t\t}\n\t\t}\n\n\t\tif res, err = delete.Exec(e.ID); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif affected, err = res.RowsAffected(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tremoved += affected\n\t}\n\n\treturn\n}\n\n\/\/ PermissionRegistry is an interface that describes in memory storage that holds information\n\/\/ about permissions that was registered by 3rd party services.\n\/\/ Should be only used as a proxy for registration process to avoid multiple sql hits.\ntype PermissionRegistry interface {\n\t\/\/ Exists returns true if given Permission was already registered.\n\tExists(permission charon.Permission) (exists bool)\n\t\/\/ Register checks if given collection is valid and\n\t\/\/ calls PermissionRepository to store provided permissions\n\t\/\/ in persistent way.\n\tRegister(permissions charon.Permissions) (created, untouched, removed int64, err error)\n}\n\ntype permissionRegistry struct {\n\tsync.RWMutex\n\trepository PermissionRepository\n\tpermissions map[charon.Permission]struct{}\n}\n\nfunc newPermissionRegistry(r PermissionRepository) PermissionRegistry {\n\treturn &permissionRegistry{\n\t\trepository: r,\n\t\tpermissions: make(map[charon.Permission]struct{}),\n\t}\n}\n\n\/\/ Exists implements PermissionRegistry interface.\nfunc (pr *permissionRegistry) Exists(permission charon.Permission) (ok bool) {\n\tpr.RLock()\n\tpr.RUnlock()\n\n\t_, ok = pr.permissions[permission]\n\treturn\n}\n\n\/\/ Register implements PermissionRegistry interface.\nfunc (pr *permissionRegistry) Register(permissions charon.Permissions) (created, untouched, removed int64, err error) {\n\tpr.Lock()\n\tdefer pr.Unlock()\n\n\tnb := 0\n\tfor _, p := range permissions {\n\t\tif _, ok := pr.permissions[p]; !ok {\n\t\t\tpr.permissions[p] = struct{}{}\n\t\t\tnb++\n\t\t}\n\t}\n\n\tif nb > 0 {\n\t\treturn pr.repository.Register(permissions)\n\t}\n\n\treturn 0, 0, 0, nil\n}\n<commit_msg>PermissionsRepository comments cleanup<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"strings\"\n\n\t\"sync\"\n\n\t\"errors\"\n\n\t\"github.com\/piotrkowalczuk\/charon\"\n)\n\n\/\/ Permission returns Permission value that is concatenated\n\/\/ using entity properties like subsystem, module and action.\nfunc (pe *permissionEntity) Permission() charon.Permission {\n\treturn charon.Permission(pe.Subsystem + \":\" + pe.Module + \":\" + pe.Action)\n}\n\n\/\/ PermissionRepository ...\ntype PermissionRepository interface {\n\tFind(criteria *permissionCriteria) ([]*permissionEntity, error)\n\tFindOneByID(id int64) (entity *permissionEntity, err error)\n\t\/\/ FindByUserID retrieves all permissions for user represented by given id.\n\tFindByUserID(userID int64) (entities []*permissionEntity, err error)\n\tRegister(permissions charon.Permissions) (created, untouched, removed int64, err error)\n\tInsert(entity *permissionEntity) (*permissionEntity, error)\n}\n\nfunc newPermissionRepository(dbPool *sql.DB) *permissionRepository {\n\treturn &permissionRepository{\n\t\tdb: dbPool,\n\t\ttable: tablePermission,\n\t\tcolumns: tablePermissionColumns,\n\t}\n}\n\n\/\/ FindByUserID implements PermissionRepository interface.\nfunc (pr *permissionRepository) FindByUserID(userID int64) ([]*permissionEntity, error) {\n\tquery := `\n\t\tSELECT DISTINCT ON (p.id)\n\t\t\t` + columns(tablePermissionColumns, \"p\") + `\n\t\tFROM ` + pr.table + ` AS p\n\t\tLEFT JOIN ` + tableUserPermissions + ` AS up ON up.permission_id = p.id AND up.user_id = $1\n\t\tLEFT JOIN ` + tableUserGroups + ` AS ug ON ug.user_id = $1\n\t\tLEFT JOIN ` + tableGroupPermissions + ` AS gp ON gp.permission_id = p.id AND gp.group_id = ug.group_id\n\t\tWHERE up.user_id = $1 OR ug.user_id = $1\n\t`\n\n\trows, err := pr.db.Query(query, userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tpermissions := []*permissionEntity{}\n\tfor rows.Next() {\n\t\tvar p permissionEntity\n\t\terr = rows.Scan(\n\t\t\t&p.Action,\n\t\t\t&p.CreatedAt,\n\t\t\t&p.ID,\n\t\t\t&p.Module,\n\t\t\t&p.Subsystem,\n\t\t\t&p.UpdatedAt,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpermissions = append(permissions, &p)\n\t}\n\tif rows.Err() != nil {\n\t\treturn nil, rows.Err()\n\t}\n\n\treturn permissions, nil\n}\n\nfunc (pr *permissionRepository) findOneStmt() (*sql.Stmt, error) {\n\treturn pr.db.Prepare(\n\t\t\"SELECT \" + strings.Join(tablePermissionColumns, \",\") + \" \" +\n\t\t\t\"FROM \" + pr.table + \" AS p \" +\n\t\t\t\"WHERE p.subsystem = $1 AND p.module = $2 AND p.action = $3\",\n\t)\n}\n\n\/\/ Register implements PermissionRepository interface.\nfunc (pr *permissionRepository) Register(permissions charon.Permissions) (created, unt, removed int64, err error) {\n\tvar (\n\t\ttx *sql.Tx\n\t\tinsert, delete *sql.Stmt\n\t\trows *sql.Rows\n\t\tres sql.Result\n\t\tsubsystem string\n\t\tentities []*permissionEntity\n\t\taffected int64\n\t)\n\tif len(permissions) == 0 {\n\t\treturn 0, 0, 0, errors.New(\"charond: empty slice, permissions cannot be registered\")\n\t}\n\n\tsubsystem = permissions[0].Subsystem()\n\tif subsystem == \"\" {\n\t\treturn 0, 0, 0, errors.New(\"charond: subsystem name is empty string, permissions cannot be registered\")\n\t}\n\n\tfor _, p := range permissions {\n\t\tif p.Subsystem() != subsystem {\n\t\t\treturn 0, 0, 0, errors.New(\"charond: provided permissions do not belong to one subsystem, permissions cannot be registered\")\n\t\t}\n\t}\n\n\ttx, err = pr.db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t\tunt = untouched(int64(len(permissions)), created, removed)\n\t\t}\n\t}()\n\n\trows, err = tx.Query(\"SELECT \"+strings.Join(tablePermissionColumns, \",\")+\" FROM \"+pr.table+\" AS p WHERE p.subsystem = $1\", subsystem)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tentities = []*permissionEntity{}\n\tfor rows.Next() {\n\t\tvar entity permissionEntity\n\t\terr = rows.Scan(\n\t\t\t&entity.Action,\n\t\t\t&entity.CreatedAt,\n\t\t\t&entity.ID,\n\t\t\t&entity.Module,\n\t\t\t&entity.Subsystem,\n\t\t\t&entity.UpdatedAt,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tentities = append(entities, &entity)\n\t}\n\tif rows.Err() != nil {\n\t\treturn 0, 0, 0, rows.Err()\n\t}\n\n\tinsert, err = tx.Prepare(\"INSERT INTO \" + pr.table + \" (subsystem, module, action) VALUES ($1, $2, $3)\")\n\tif err != nil {\n\t\treturn\n\t}\n\nMissingPermissionsLoop:\n\tfor _, p := range permissions {\n\t\tfor _, e := range entities {\n\t\t\tif p == e.Permission() {\n\t\t\t\tcontinue MissingPermissionsLoop\n\t\t\t}\n\t\t}\n\n\t\tif res, err = insert.Exec(p.Split()); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif affected, err = res.RowsAffected(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tcreated += affected\n\t}\n\n\tdelete, err = tx.Prepare(\"DELETE FROM \" + pr.table + \" AS p WHERE p.id = $1\")\n\tif err != nil {\n\t\treturn\n\t}\n\nRedundantPermissionsLoop:\n\tfor _, e := range entities {\n\t\tfor _, p := range permissions {\n\t\t\tif e.Permission() == p {\n\t\t\t\tcontinue RedundantPermissionsLoop\n\t\t\t}\n\t\t}\n\n\t\tif res, err = delete.Exec(e.ID); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif affected, err = res.RowsAffected(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tremoved += affected\n\t}\n\n\treturn\n}\n\n\/\/ PermissionRegistry is an interface that describes in memory storage that holds information\n\/\/ about permissions that was registered by 3rd party services.\n\/\/ Should be only used as a proxy for registration process to avoid multiple sql hits.\ntype PermissionRegistry interface {\n\t\/\/ Exists returns true if given Permission was already registered.\n\tExists(permission charon.Permission) (exists bool)\n\t\/\/ Register checks if given collection is valid and\n\t\/\/ calls PermissionRepository to store provided permissions\n\t\/\/ in persistent way.\n\tRegister(permissions charon.Permissions) (created, untouched, removed int64, err error)\n}\n\ntype permissionRegistry struct {\n\tsync.RWMutex\n\trepository PermissionRepository\n\tpermissions map[charon.Permission]struct{}\n}\n\nfunc newPermissionRegistry(r PermissionRepository) PermissionRegistry {\n\treturn &permissionRegistry{\n\t\trepository: r,\n\t\tpermissions: make(map[charon.Permission]struct{}),\n\t}\n}\n\n\/\/ Exists implements PermissionRegistry interface.\nfunc (pr *permissionRegistry) Exists(permission charon.Permission) (ok bool) {\n\tpr.RLock()\n\tpr.RUnlock()\n\n\t_, ok = pr.permissions[permission]\n\treturn\n}\n\n\/\/ Register implements PermissionRegistry interface.\nfunc (pr *permissionRegistry) Register(permissions charon.Permissions) (created, untouched, removed int64, err error) {\n\tpr.Lock()\n\tdefer pr.Unlock()\n\n\tnb := 0\n\tfor _, p := range permissions {\n\t\tif _, ok := pr.permissions[p]; !ok {\n\t\t\tpr.permissions[p] = struct{}{}\n\t\t\tnb++\n\t\t}\n\t}\n\n\tif nb > 0 {\n\t\treturn pr.repository.Register(permissions)\n\t}\n\n\treturn 0, 0, 0, nil\n}\n\n\/\/ FindByTag implements PermissionRepository interface.\nfunc (pr *permissionRepository) FindByTag(userID int64) ([]*permissionEntity, error) {\n\tquery := `\n\t\tSELECT DISTINCT ON (p.id)\n\t\t\t` + columns(tablePermissionColumns, \"p\") + `\n\t\tFROM ` + pr.table + ` AS p\n\t\tLEFT JOIN ` + tableUserPermissions + ` AS up ON up.permission_id = p.id AND up.user_id = $1\n\t\tLEFT JOIN ` + tableUserGroups + ` AS ug ON ug.user_id = $1\n\t\tLEFT JOIN ` + tableGroupPermissions + ` AS gp ON gp.permission_id = p.id AND gp.group_id = ug.group_id\n\t\tWHERE up.user_id = $1 OR ug.user_id = $1\n\t`\n\n\trows, err := pr.db.Query(query, userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tpermissions := []*permissionEntity{}\n\tfor rows.Next() {\n\t\tvar p permissionEntity\n\t\terr = rows.Scan(\n\t\t\t&p.Action,\n\t\t\t&p.CreatedAt,\n\t\t\t&p.ID,\n\t\t\t&p.Module,\n\t\t\t&p.Subsystem,\n\t\t\t&p.UpdatedAt,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpermissions = append(permissions, &p)\n\t}\n\tif rows.Err() != nil {\n\t\treturn nil, rows.Err()\n\t}\n\n\treturn permissions, nil\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage autocmd\n\nimport (\n\t\"time\"\n\n\t\"nvim-go\/nvimutil\"\n)\n\n\/\/ bufEnterEval represents the current buffer number, windows ID and buffer files directory.\ntype bufEnterEval struct {\n\tBufNr int `eval:\"bufnr('%')\"`\n\tWinID int `eval:\"win_getid()\"`\n\tDir string `eval:\"expand('%:p:h')\"`\n}\n\n\/\/ BufEnter gets the current buffer number, windows ID and set context from the directory structure on BufEnter autocmd.\nfunc (a *Autocmd) BufEnter(eval *bufEnterEval) error {\n\tdefer nvimutil.Profile(time.Now(), \"BufEnter\")\n\n\ta.mu.Lock()\n\ta.buildctxt.BufNr = eval.BufNr\n\ta.buildctxt.WinID = eval.WinID\n\ta.buildctxt.Dir = eval.Dir\n\ta.mu.Unlock()\n\n\ta.buildctxt.SetContext(eval.Dir)\n\treturn nil\n}\n<commit_msg>autocmd\/bufenter: cache prevDir<commit_after>\/\/ Copyright 2017 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage autocmd\n\nimport (\n\t\"time\"\n\n\t\"nvim-go\/nvimutil\"\n)\n\n\/\/ bufEnterEval represents the current buffer number, windows ID and buffer files directory.\ntype bufEnterEval struct {\n\tBufNr int `eval:\"bufnr('%')\"`\n\tWinID int `eval:\"win_getid()\"`\n\tDir string `eval:\"expand('%:p:h')\"`\n}\n\n\/\/ BufEnter gets the current buffer number, windows ID and set context from the directory structure on BufEnter autocmd.\nfunc (a *Autocmd) BufEnter(eval *bufEnterEval) error {\n\tdefer nvimutil.Profile(time.Now(), \"BufEnter\")\n\n\ta.mu.Lock()\n\ta.buildctxt.BufNr = eval.BufNr\n\ta.buildctxt.WinID = eval.WinID\n\ta.buildctxt.Dir = eval.Dir\n\ta.mu.Unlock()\n\n\tif eval.Dir != \"\" && a.buildctxt.PrevDir != eval.Dir {\n\t\ta.buildctxt.SetContext(eval.Dir)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/bpf\"\n\t\"github.com\/cilium\/cilium\/pkg\/endpoint\"\n\t\"github.com\/cilium\/cilium\/pkg\/identity\"\n\t\"github.com\/cilium\/cilium\/pkg\/maps\/policymap\"\n\t\"github.com\/cilium\/cilium\/pkg\/u8proto\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Fatalf prints the Printf formatted message to stderr and exits the program\n\/\/ Note: os.Exit(1) is not recoverable\nfunc Fatalf(msg string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\", fmt.Sprintf(msg, args...))\n\tos.Exit(1)\n}\n\n\/\/ Usagef prints the Printf formatted message to stderr, prints usage help and\n\/\/ exits the program\n\/\/ Note: os.Exit(1) is not recoverable\nfunc Usagef(cmd *cobra.Command, msg string, args ...interface{}) {\n\ttxt := fmt.Sprintf(msg, args...)\n\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\\n\", txt)\n\tcmd.Help()\n\tos.Exit(1)\n}\n\nfunc requireEndpointID(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing endpoint id argument\")\n\t}\n\n\tif id := identity.ReservedIdentities[args[0]]; id == identity.IdentityUnknown {\n\t\t_, _, err := endpoint.ValidateID(args[0])\n\n\t\tif err != nil {\n\t\t\tFatalf(\"Cannot parse endpoint id \\\"%s\\\": %s\", args[0], err)\n\t\t}\n\t}\n}\n\nfunc requireEndpointIDorGlobal(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing endpoint id or 'global' argument\")\n\t}\n\n\tif args[0] != \"global\" {\n\t\trequireEndpointID(cmd, args)\n\t}\n}\n\nfunc requirePath(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing path argument\")\n\t}\n\n\tif args[0] == \"\" {\n\t\tUsagef(cmd, \"Empty path argument\")\n\t}\n}\n\nfunc requireServiceID(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing service id argument\")\n\t}\n\n\tif args[0] == \"\" {\n\t\tUsagef(cmd, \"Empty service id argument\")\n\t}\n}\n\n\/\/ TablePrinter prints the map[string][]string, which is an usual representation\n\/\/ of dumped BPF map, using tabwriter.\nfunc TablePrinter(firstTitle, secondTitle string, data map[string][]string) {\n\tw := tabwriter.NewWriter(os.Stdout, 5, 0, 3, ' ', 0)\n\n\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", firstTitle, secondTitle)\n\n\tfor key, value := range data {\n\t\tfor k, v := range value {\n\t\t\tif k == 0 {\n\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", key, v)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", \"\", v)\n\t\t\t}\n\t\t}\n\t}\n\n\tw.Flush()\n}\n\n\/\/ Search 'result' for strings with escaped JSON inside, and expand the JSON.\nfunc expandNestedJSON(result bytes.Buffer) (bytes.Buffer, error) {\n\treStringWithJSON := regexp.MustCompile(`\"[^\"\\\\{]*{.*[^\\\\]\"`)\n\treJSON := regexp.MustCompile(`{.*}`)\n\tfor {\n\t\tvar (\n\t\t\tloc []int\n\t\t\tindent string\n\t\t)\n\n\t\t\/\/ Search for nested JSON; if we don't find any, then break.\n\t\tresBytes := result.Bytes()\n\t\tif loc = reStringWithJSON.FindIndex(resBytes); loc == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Determine the current indentation\n\t\tfor i := 0; i < loc[0]-1; i++ {\n\t\t\tidx := loc[0] - i - 1\n\t\t\tif resBytes[idx] != ' ' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tindent = fmt.Sprintf(\"%s \", indent)\n\t\t}\n\n\t\tstringStart := loc[0]\n\t\tstringEnd := loc[1]\n\n\t\t\/\/ Unquote the string with the nested json.\n\t\tquotedBytes := resBytes[stringStart:stringEnd]\n\t\tunquoted, err := strconv.Unquote(string(quotedBytes))\n\t\tif err != nil {\n\t\t\treturn bytes.Buffer{}, fmt.Errorf(\"Failed to Unquote string: %s\\n%s\", err.Error(), string(quotedBytes))\n\t\t}\n\n\t\t\/\/ Find the JSON within the quoted string.\n\t\tnestedStart := 0\n\t\tnestedEnd := 0\n\t\tif locs := reJSON.FindAllStringIndex(unquoted, -1); locs != nil {\n\t\t\t\/\/ The last match is the longest one.\n\t\t\tlast := len(locs) - 1\n\t\t\tnestedStart = locs[last][0]\n\t\t\tnestedEnd = locs[last][1]\n\t\t} else if reJSON.Match(quotedBytes) {\n\t\t\t\/\/ The entire string is JSON\n\t\t\tnestedEnd = len(unquoted)\n\t\t}\n\n\t\t\/\/ Decode the nested JSON\n\t\tdecoded := \"\"\n\t\tif nestedEnd != 0 {\n\t\t\tm := make(map[string]interface{})\n\t\t\tnested := bytes.NewBufferString(unquoted[nestedStart:nestedEnd])\n\t\t\tif err := json.NewDecoder(nested).Decode(&m); err != nil {\n\t\t\t\treturn bytes.Buffer{}, fmt.Errorf(\"Failed to decode nested JSON: %s\", err.Error())\n\t\t\t}\n\t\t\tdecodedBytes, err := json.MarshalIndent(m, indent, \" \")\n\t\t\tif err != nil {\n\t\t\t\treturn bytes.Buffer{}, fmt.Errorf(\"Cannot marshal nested JSON: %s\", err.Error())\n\t\t\t}\n\t\t\tdecoded = string(decodedBytes)\n\t\t}\n\n\t\t\/\/ Serialize\n\t\tnextResult := bytes.Buffer{}\n\t\tnextResult.Write(resBytes[0:stringStart])\n\t\tnextResult.WriteString(string(unquoted[:nestedStart]))\n\t\tnextResult.WriteString(string(decoded))\n\t\tnextResult.WriteString(string(unquoted[nestedEnd:]))\n\t\tnextResult.Write(resBytes[stringEnd:])\n\t\tresult = nextResult\n\t}\n\n\treturn result, nil\n}\n\n\/\/ parseTrafficString converts the provided string to its corresponding\n\/\/ TrafficDirection. If the string does not correspond to a valid TrafficDirection\n\/\/ type, returns Invalid and a corresponding error.\nfunc parseTrafficString(td string) (policymap.TrafficDirection, error) {\n\tlowered := strings.ToLower(td)\n\tif lowered == \"ingress\" {\n\t\treturn policymap.Ingress, nil\n\t} else if lowered == \"egress\" {\n\t\treturn policymap.Egress, nil\n\t} else {\n\t\treturn policymap.Invalid, fmt.Errorf(\"invalid direction %q provided\", td)\n\t}\n\n}\n\nfunc updatePolicyKey(cmd *cobra.Command, args []string, add bool) {\n\tif len(args) < 3 {\n\t\tUsagef(cmd, \"<endpoint id>, <traffic-direction>, and <identity> required\")\n\t}\n\n\ttrafficDirection := args[1]\n\tparsedTd, err := parseTrafficString(trafficDirection)\n\tif err != nil {\n\t\tFatalf(\"Failed to convert %s to a valid traffic direction: %s\", args[1], err)\n\t}\n\n\tendpointID := args[0]\n\tif numericIdentity := identity.GetReservedID(endpointID); numericIdentity != identity.IdentityUnknown {\n\t\tendpointID = \"reserved_\" + strconv.FormatUint(uint64(numericIdentity), 10)\n\t}\n\n\tpolicyMapPath := bpf.MapPath(policymap.MapName + endpointID)\n\tpolicyMap, _, err := policymap.OpenMap(policyMapPath)\n\tif err != nil {\n\t\tFatalf(\"Cannot open policymap '%s' : %s\", policyMapPath, err)\n\t}\n\n\tpeerLbl, err := strconv.ParseUint(args[2], 10, 32)\n\tif err != nil {\n\t\tFatalf(\"Failed to convert %s\", args[2])\n\t}\n\n\tport := uint16(0)\n\tprotos := []uint8{}\n\tif len(args) > 3 {\n\t\tpp, err := parseL4PortsSlice([]string{args[3]})\n\t\tif err != nil {\n\t\t\tFatalf(\"Failed to parse L4: %s\", err)\n\t\t}\n\t\tport = pp[0].Port\n\t\tif port != 0 {\n\t\t\tproto, _ := u8proto.ParseProtocol(pp[0].Protocol)\n\t\t\tif proto == 0 {\n\t\t\t\tfor _, proto := range u8proto.ProtoIDs {\n\t\t\t\t\tprotos = append(protos, uint8(proto))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tprotos = append(protos, uint8(proto))\n\t\t\t}\n\t\t}\n\t}\n\tif len(protos) == 0 {\n\t\tprotos = append(protos, 0)\n\t}\n\n\tlabel := uint32(peerLbl)\n\tfor _, proto := range protos {\n\t\tu8p := u8proto.U8proto(proto)\n\t\tentry := fmt.Sprintf(\"%d %d\/%s\", label, port, u8p.String())\n\t\tif add == true {\n\t\t\tif err := policyMap.AllowL4(label, port, proto, parsedTd); err != nil {\n\t\t\t\tFatalf(\"Cannot add policy key '%s': %s\\n\", entry, err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := policyMap.DeleteL4(label, port, proto, parsedTd); err != nil {\n\t\t\t\tFatalf(\"Cannot delete policy key '%s': %s\\n\", entry, err)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>cilium\/cmd: refactor parseTrafficString<commit_after>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/bpf\"\n\t\"github.com\/cilium\/cilium\/pkg\/endpoint\"\n\t\"github.com\/cilium\/cilium\/pkg\/identity\"\n\t\"github.com\/cilium\/cilium\/pkg\/maps\/policymap\"\n\t\"github.com\/cilium\/cilium\/pkg\/u8proto\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Fatalf prints the Printf formatted message to stderr and exits the program\n\/\/ Note: os.Exit(1) is not recoverable\nfunc Fatalf(msg string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\", fmt.Sprintf(msg, args...))\n\tos.Exit(1)\n}\n\n\/\/ Usagef prints the Printf formatted message to stderr, prints usage help and\n\/\/ exits the program\n\/\/ Note: os.Exit(1) is not recoverable\nfunc Usagef(cmd *cobra.Command, msg string, args ...interface{}) {\n\ttxt := fmt.Sprintf(msg, args...)\n\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\\n\", txt)\n\tcmd.Help()\n\tos.Exit(1)\n}\n\nfunc requireEndpointID(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing endpoint id argument\")\n\t}\n\n\tif id := identity.ReservedIdentities[args[0]]; id == identity.IdentityUnknown {\n\t\t_, _, err := endpoint.ValidateID(args[0])\n\n\t\tif err != nil {\n\t\t\tFatalf(\"Cannot parse endpoint id \\\"%s\\\": %s\", args[0], err)\n\t\t}\n\t}\n}\n\nfunc requireEndpointIDorGlobal(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing endpoint id or 'global' argument\")\n\t}\n\n\tif args[0] != \"global\" {\n\t\trequireEndpointID(cmd, args)\n\t}\n}\n\nfunc requirePath(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing path argument\")\n\t}\n\n\tif args[0] == \"\" {\n\t\tUsagef(cmd, \"Empty path argument\")\n\t}\n}\n\nfunc requireServiceID(cmd *cobra.Command, args []string) {\n\tif len(args) < 1 {\n\t\tUsagef(cmd, \"Missing service id argument\")\n\t}\n\n\tif args[0] == \"\" {\n\t\tUsagef(cmd, \"Empty service id argument\")\n\t}\n}\n\n\/\/ TablePrinter prints the map[string][]string, which is an usual representation\n\/\/ of dumped BPF map, using tabwriter.\nfunc TablePrinter(firstTitle, secondTitle string, data map[string][]string) {\n\tw := tabwriter.NewWriter(os.Stdout, 5, 0, 3, ' ', 0)\n\n\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", firstTitle, secondTitle)\n\n\tfor key, value := range data {\n\t\tfor k, v := range value {\n\t\t\tif k == 0 {\n\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", key, v)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", \"\", v)\n\t\t\t}\n\t\t}\n\t}\n\n\tw.Flush()\n}\n\n\/\/ Search 'result' for strings with escaped JSON inside, and expand the JSON.\nfunc expandNestedJSON(result bytes.Buffer) (bytes.Buffer, error) {\n\treStringWithJSON := regexp.MustCompile(`\"[^\"\\\\{]*{.*[^\\\\]\"`)\n\treJSON := regexp.MustCompile(`{.*}`)\n\tfor {\n\t\tvar (\n\t\t\tloc []int\n\t\t\tindent string\n\t\t)\n\n\t\t\/\/ Search for nested JSON; if we don't find any, then break.\n\t\tresBytes := result.Bytes()\n\t\tif loc = reStringWithJSON.FindIndex(resBytes); loc == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Determine the current indentation\n\t\tfor i := 0; i < loc[0]-1; i++ {\n\t\t\tidx := loc[0] - i - 1\n\t\t\tif resBytes[idx] != ' ' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tindent = fmt.Sprintf(\"%s \", indent)\n\t\t}\n\n\t\tstringStart := loc[0]\n\t\tstringEnd := loc[1]\n\n\t\t\/\/ Unquote the string with the nested json.\n\t\tquotedBytes := resBytes[stringStart:stringEnd]\n\t\tunquoted, err := strconv.Unquote(string(quotedBytes))\n\t\tif err != nil {\n\t\t\treturn bytes.Buffer{}, fmt.Errorf(\"Failed to Unquote string: %s\\n%s\", err.Error(), string(quotedBytes))\n\t\t}\n\n\t\t\/\/ Find the JSON within the quoted string.\n\t\tnestedStart := 0\n\t\tnestedEnd := 0\n\t\tif locs := reJSON.FindAllStringIndex(unquoted, -1); locs != nil {\n\t\t\t\/\/ The last match is the longest one.\n\t\t\tlast := len(locs) - 1\n\t\t\tnestedStart = locs[last][0]\n\t\t\tnestedEnd = locs[last][1]\n\t\t} else if reJSON.Match(quotedBytes) {\n\t\t\t\/\/ The entire string is JSON\n\t\t\tnestedEnd = len(unquoted)\n\t\t}\n\n\t\t\/\/ Decode the nested JSON\n\t\tdecoded := \"\"\n\t\tif nestedEnd != 0 {\n\t\t\tm := make(map[string]interface{})\n\t\t\tnested := bytes.NewBufferString(unquoted[nestedStart:nestedEnd])\n\t\t\tif err := json.NewDecoder(nested).Decode(&m); err != nil {\n\t\t\t\treturn bytes.Buffer{}, fmt.Errorf(\"Failed to decode nested JSON: %s\", err.Error())\n\t\t\t}\n\t\t\tdecodedBytes, err := json.MarshalIndent(m, indent, \" \")\n\t\t\tif err != nil {\n\t\t\t\treturn bytes.Buffer{}, fmt.Errorf(\"Cannot marshal nested JSON: %s\", err.Error())\n\t\t\t}\n\t\t\tdecoded = string(decodedBytes)\n\t\t}\n\n\t\t\/\/ Serialize\n\t\tnextResult := bytes.Buffer{}\n\t\tnextResult.Write(resBytes[0:stringStart])\n\t\tnextResult.WriteString(string(unquoted[:nestedStart]))\n\t\tnextResult.WriteString(string(decoded))\n\t\tnextResult.WriteString(string(unquoted[nestedEnd:]))\n\t\tnextResult.Write(resBytes[stringEnd:])\n\t\tresult = nextResult\n\t}\n\n\treturn result, nil\n}\n\n\/\/ parseTrafficString converts the provided string to its corresponding\n\/\/ TrafficDirection. If the string does not correspond to a valid TrafficDirection\n\/\/ type, returns Invalid and a corresponding error.\nfunc parseTrafficString(td string) (policymap.TrafficDirection, error) {\n\tlowered := strings.ToLower(td)\n\n\tswitch lowered {\n\tcase \"ingress\":\n\t\treturn policymap.Ingress, nil\n\tcase \"egress\":\n\t\treturn policymap.Egress, nil\n\tdefault:\n\t\treturn policymap.Invalid, fmt.Errorf(\"invalid direction %q provided\", td)\n\t}\n\n}\n\nfunc updatePolicyKey(cmd *cobra.Command, args []string, add bool) {\n\tif len(args) < 3 {\n\t\tUsagef(cmd, \"<endpoint id>, <traffic-direction>, and <identity> required\")\n\t}\n\n\ttrafficDirection := args[1]\n\tparsedTd, err := parseTrafficString(trafficDirection)\n\tif err != nil {\n\t\tFatalf(\"Failed to convert %s to a valid traffic direction: %s\", args[1], err)\n\t}\n\n\tendpointID := args[0]\n\tif numericIdentity := identity.GetReservedID(endpointID); numericIdentity != identity.IdentityUnknown {\n\t\tendpointID = \"reserved_\" + strconv.FormatUint(uint64(numericIdentity), 10)\n\t}\n\n\tpolicyMapPath := bpf.MapPath(policymap.MapName + endpointID)\n\tpolicyMap, _, err := policymap.OpenMap(policyMapPath)\n\tif err != nil {\n\t\tFatalf(\"Cannot open policymap '%s' : %s\", policyMapPath, err)\n\t}\n\n\tpeerLbl, err := strconv.ParseUint(args[2], 10, 32)\n\tif err != nil {\n\t\tFatalf(\"Failed to convert %s\", args[2])\n\t}\n\n\tport := uint16(0)\n\tprotos := []uint8{}\n\tif len(args) > 3 {\n\t\tpp, err := parseL4PortsSlice([]string{args[3]})\n\t\tif err != nil {\n\t\t\tFatalf(\"Failed to parse L4: %s\", err)\n\t\t}\n\t\tport = pp[0].Port\n\t\tif port != 0 {\n\t\t\tproto, _ := u8proto.ParseProtocol(pp[0].Protocol)\n\t\t\tif proto == 0 {\n\t\t\t\tfor _, proto := range u8proto.ProtoIDs {\n\t\t\t\t\tprotos = append(protos, uint8(proto))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tprotos = append(protos, uint8(proto))\n\t\t\t}\n\t\t}\n\t}\n\tif len(protos) == 0 {\n\t\tprotos = append(protos, 0)\n\t}\n\n\tlabel := uint32(peerLbl)\n\tfor _, proto := range protos {\n\t\tu8p := u8proto.U8proto(proto)\n\t\tentry := fmt.Sprintf(\"%d %d\/%s\", label, port, u8p.String())\n\t\tif add == true {\n\t\t\tif err := policyMap.AllowL4(label, port, proto, parsedTd); err != nil {\n\t\t\t\tFatalf(\"Cannot add policy key '%s': %s\\n\", entry, err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := policyMap.DeleteL4(label, port, proto, parsedTd); err != nil {\n\t\t\t\tFatalf(\"Cannot delete policy key '%s': %s\\n\", entry, err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integ\n\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helmupgrade\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"istio.io\/istio\/pkg\/test\/env\"\n\t\"istio.io\/istio\/pkg\/test\/framework\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/cluster\"\n\tkubecluster \"istio.io\/istio\/pkg\/test\/framework\/components\/cluster\/kube\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/image\"\n\t\"istio.io\/istio\/pkg\/test\/helm\"\n\tkubetest \"istio.io\/istio\/pkg\/test\/kube\"\n\t\"istio.io\/istio\/pkg\/test\/scopes\"\n\t\"istio.io\/istio\/pkg\/test\/util\/retry\"\n\thelmtest \"istio.io\/istio\/tests\/integration\/helm\"\n\t\"istio.io\/istio\/tests\/util\/sanitycheck\"\n)\n\nconst (\n\tgcrHub = \"gcr.io\/istio-release\"\n\n\tdefaultValues = `\nglobal:\n hub: %s\n tag: %s\n`\n\trevisionValues = `\nglobal:\n hub: %s\n tag: %s\n\nrevision: canary\n`\n\ttarGzSuffix = \".tar.gz\"\n\n\trevisionLabel = \"canary\"\n\trevisionChartSuffix = \"-canary\"\n)\n\n\/\/ previousChartPath is path of Helm charts for previous Istio deployments.\nvar previousChartPath = filepath.Join(env.IstioSrc, \"tests\/integration\/helm\/testdata\/\")\n\n\/\/ upgradeCharts upgrades Istio using Helm charts with the provided\n\/\/ override values file to the latest charts in $ISTIO_SRC\/manifests\nfunc upgradeCharts(ctx framework.TestContext, h *helm.Helm, overrideValuesFile string) {\n\t\/\/ Upgrade base chart\n\terr := h.UpgradeChart(helmtest.BaseReleaseName, filepath.Join(helmtest.ChartPath, helmtest.BaseChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.BaseChart)\n\t}\n\n\t\/\/ Upgrade discovery chart\n\terr = h.UpgradeChart(helmtest.IstiodReleaseName, filepath.Join(helmtest.ChartPath, helmtest.ControlChartsDir, helmtest.DiscoveryChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.DiscoveryChart)\n\t}\n\n\t\/\/ Upgrade ingress gateway chart\n\terr = h.UpgradeChart(helmtest.IngressReleaseName, filepath.Join(helmtest.ChartPath, helmtest.GatewayChartsDir, helmtest.IngressGatewayChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.IngressGatewayChart)\n\t}\n\n\t\/\/ Upgrade egress gateway chart\n\terr = h.UpgradeChart(helmtest.EgressReleaseName, filepath.Join(helmtest.ChartPath, helmtest.GatewayChartsDir, helmtest.EgressGatewayChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.EgressGatewayChart)\n\t}\n}\n\n\/\/ deleteIstio deletes installed Istio Helm charts and resources\nfunc deleteIstio(cs cluster.Cluster, h *helm.Helm) error {\n\tscopes.Framework.Infof(\"cleaning up resources\")\n\tif err := h.DeleteChart(helmtest.EgressReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.EgressReleaseName)\n\t}\n\tif err := h.DeleteChart(helmtest.IngressReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.IngressReleaseName)\n\t}\n\tif err := h.DeleteChart(helmtest.IstiodReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.IngressReleaseName)\n\t}\n\tif err := h.DeleteChart(helmtest.BaseReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.BaseReleaseName)\n\t}\n\tif err := cs.CoreV1().Namespaces().Delete(context.TODO(), helmtest.IstioNamespace, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete istio namespace: %v\", err)\n\t}\n\tif err := kubetest.WaitForNamespaceDeletion(cs, helmtest.IstioNamespace, retry.Timeout(helmtest.RetryTimeOut)); err != nil {\n\t\treturn fmt.Errorf(\"wating for istio namespace to be deleted: %v\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ deleteIstioCanary deletes installed Istio Helm charts and resources\nfunc deleteIstioRevision(h *helm.Helm, revision string) error {\n\tscopes.Framework.Infof(\"cleaning up canary resources\")\n\tif err := h.DeleteChart(helmtest.IstiodReleaseName+revision, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.IngressReleaseName)\n\t}\n\n\treturn nil\n}\n\n\/\/ getValuesOverrides returns the the values file created to pass into Helm override default values\n\/\/ for the hub and tag\nfunc getValuesOverrides(ctx framework.TestContext, valuesStr, hub, tag string) string {\n\tworkDir := ctx.CreateTmpDirectoryOrFail(\"helm\")\n\toverrideValues := fmt.Sprintf(valuesStr, hub, tag)\n\toverrideValuesFile := filepath.Join(workDir, \"values.yaml\")\n\tif err := ioutil.WriteFile(overrideValuesFile, []byte(overrideValues), os.ModePerm); err != nil {\n\t\tctx.Fatalf(\"failed to write iop cr file: %v\", err)\n\t}\n\n\treturn overrideValuesFile\n}\n\n\/\/ performInPlaceUpgradeFunc returns the provided function necessary to run inside of a integration test\n\/\/ for upgrade capability\nfunc performInPlaceUpgradeFunc(previousVersion string) func(framework.TestContext) {\n\treturn func(t framework.TestContext) {\n\t\tcs := t.Clusters().Default().(*kubecluster.Cluster)\n\t\th := helm.New(cs.Filename(), filepath.Join(previousChartPath, previousVersion))\n\n\t\tt.ConditionalCleanup(func() {\n\t\t\t\/\/ only need to do call this once as helm doesn't need to remove\n\t\t\t\/\/ all versions\n\t\t\terr := deleteIstio(cs, h)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"could not delete istio: %v\", err)\n\t\t\t}\n\t\t})\n\n\t\toverrideValuesFile := getValuesOverrides(t, defaultValues, gcrHub, previousVersion)\n\t\thelmtest.InstallIstio(t, cs, h, tarGzSuffix, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\toldClient, oldServer := sanitycheck.SetupTrafficTest(t, t, \"\")\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, oldServer)\n\n\t\t\/\/ now upgrade istio to the latest version found in this branch\n\t\t\/\/ use the command line or environmental vars from the user to set\n\t\t\/\/ the hub\/tag\n\t\ts, err := image.SettingsFromCommandLine()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\toverrideValuesFile = getValuesOverrides(t, defaultValues, s.Hub, s.Tag)\n\t\tupgradeCharts(t, h, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\tnewClient, newServer := sanitycheck.SetupTrafficTest(t, t, \"\")\n\t\tsanitycheck.RunTrafficTestClientServer(t, newClient, newServer)\n\n\t\t\/\/ now check that we are compatible with N-1 proxy with N proxy\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, newServer)\n\t}\n}\n\n\/\/ performRevisionUpgradeFunc returns the provided function necessary to run inside of a integration test\n\/\/ for upgrade capability with revisions\nfunc performRevisionUpgradeFunc(previousVersion string) func(framework.TestContext) {\n\treturn func(t framework.TestContext) {\n\t\tcs := t.Clusters().Default().(*kubecluster.Cluster)\n\t\th := helm.New(cs.Filename(), filepath.Join(previousChartPath, previousVersion))\n\n\t\tt.ConditionalCleanup(func() {\n\t\t\terr := deleteIstioRevision(h, revisionChartSuffix)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"could not delete istio: %v\", err)\n\t\t\t}\n\t\t\terr = deleteIstio(cs, h)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"could not delete istio: %v\", err)\n\t\t\t}\n\t\t})\n\n\t\toverrideValuesFile := getValuesOverrides(t, defaultValues, gcrHub, previousVersion)\n\t\thelmtest.InstallIstio(t, cs, h, tarGzSuffix, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\toldClient, oldServer := sanitycheck.SetupTrafficTest(t, t, \"\")\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, oldServer)\n\n\t\t\/\/ now upgrade istio to the latest version found in this branch\n\t\t\/\/ use the command line or environmental vars from the user to set\n\t\t\/\/ the hub\/tag\n\t\ts, err := image.SettingsFromCommandLine()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\toverrideValuesFile = getValuesOverrides(t, revisionValues, s.Hub, s.Tag)\n\t\thelmtest.InstallIstioWithRevision(t, cs, h, tarGzSuffix, revisionChartSuffix, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\tnewClient, newServer := sanitycheck.SetupTrafficTest(t, t, revisionLabel)\n\t\tsanitycheck.RunTrafficTestClientServer(t, newClient, newServer)\n\n\t\t\/\/ now check that we are compatible with N-1 proxy with N proxy\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, newServer)\n\t}\n}\n<commit_msg>modify parameters in helm_upgrade (#33465)<commit_after>\/\/ +build integ\n\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helmupgrade\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"istio.io\/istio\/pkg\/test\/env\"\n\t\"istio.io\/istio\/pkg\/test\/framework\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/cluster\"\n\tkubecluster \"istio.io\/istio\/pkg\/test\/framework\/components\/cluster\/kube\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/image\"\n\t\"istio.io\/istio\/pkg\/test\/helm\"\n\tkubetest \"istio.io\/istio\/pkg\/test\/kube\"\n\t\"istio.io\/istio\/pkg\/test\/scopes\"\n\t\"istio.io\/istio\/pkg\/test\/util\/retry\"\n\thelmtest \"istio.io\/istio\/tests\/integration\/helm\"\n\t\"istio.io\/istio\/tests\/util\/sanitycheck\"\n)\n\nconst (\n\tgcrHub = \"gcr.io\/istio-release\"\n\n\tdefaultValues = `\nglobal:\n hub: %s\n tag: %s\n`\n\trevisionValues = `\nglobal:\n hub: %s\n tag: %s\n\nrevision: canary\n`\n\ttarGzSuffix = \".tar.gz\"\n\n\trevisionLabel = \"canary\"\n\trevisionChartSuffix = \"-canary\"\n)\n\n\/\/ previousChartPath is path of Helm charts for previous Istio deployments.\nvar previousChartPath = filepath.Join(env.IstioSrc, \"tests\/integration\/helm\/testdata\/\")\n\n\/\/ upgradeCharts upgrades Istio using Helm charts with the provided\n\/\/ override values file to the latest charts in $ISTIO_SRC\/manifests\nfunc upgradeCharts(ctx framework.TestContext, h *helm.Helm, overrideValuesFile string) {\n\t\/\/ Upgrade base chart\n\terr := h.UpgradeChart(helmtest.BaseReleaseName, filepath.Join(helmtest.ChartPath, helmtest.BaseChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.BaseChart)\n\t}\n\n\t\/\/ Upgrade discovery chart\n\terr = h.UpgradeChart(helmtest.IstiodReleaseName, filepath.Join(helmtest.ChartPath, helmtest.ControlChartsDir, helmtest.DiscoveryChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.DiscoveryChart)\n\t}\n\n\t\/\/ Upgrade ingress gateway chart\n\terr = h.UpgradeChart(helmtest.IngressReleaseName, filepath.Join(helmtest.ChartPath, helmtest.GatewayChartsDir, helmtest.IngressGatewayChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.IngressGatewayChart)\n\t}\n\n\t\/\/ Upgrade egress gateway chart\n\terr = h.UpgradeChart(helmtest.EgressReleaseName, filepath.Join(helmtest.ChartPath, helmtest.GatewayChartsDir, helmtest.EgressGatewayChart),\n\t\thelmtest.IstioNamespace, overrideValuesFile, helmtest.Timeout)\n\tif err != nil {\n\t\tctx.Fatalf(\"failed to upgrade istio %s chart\", helmtest.EgressGatewayChart)\n\t}\n}\n\n\/\/ deleteIstio deletes installed Istio Helm charts and resources\nfunc deleteIstio(cs cluster.Cluster, h *helm.Helm) error {\n\tscopes.Framework.Infof(\"cleaning up resources\")\n\tif err := h.DeleteChart(helmtest.EgressReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.EgressReleaseName)\n\t}\n\tif err := h.DeleteChart(helmtest.IngressReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.IngressReleaseName)\n\t}\n\tif err := h.DeleteChart(helmtest.IstiodReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.IstiodReleaseName)\n\t}\n\tif err := h.DeleteChart(helmtest.BaseReleaseName, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.BaseReleaseName)\n\t}\n\tif err := cs.CoreV1().Namespaces().Delete(context.TODO(), helmtest.IstioNamespace, metav1.DeleteOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete istio namespace: %v\", err)\n\t}\n\tif err := kubetest.WaitForNamespaceDeletion(cs, helmtest.IstioNamespace, retry.Timeout(helmtest.RetryTimeOut)); err != nil {\n\t\treturn fmt.Errorf(\"wating for istio namespace to be deleted: %v\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ deleteIstioCanary deletes installed Istio Helm charts and resources\nfunc deleteIstioRevision(h *helm.Helm, revision string) error {\n\tscopes.Framework.Infof(\"cleaning up canary resources\")\n\tif err := h.DeleteChart(helmtest.IstiodReleaseName+revision, helmtest.IstioNamespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete %s release\", helmtest.IstiodReleaseName+revision)\n\t}\n\n\treturn nil\n}\n\n\/\/ getValuesOverrides returns the the values file created to pass into Helm override default values\n\/\/ for the hub and tag\nfunc getValuesOverrides(ctx framework.TestContext, valuesStr, hub, tag string) string {\n\tworkDir := ctx.CreateTmpDirectoryOrFail(\"helm\")\n\toverrideValues := fmt.Sprintf(valuesStr, hub, tag)\n\toverrideValuesFile := filepath.Join(workDir, \"values.yaml\")\n\tif err := ioutil.WriteFile(overrideValuesFile, []byte(overrideValues), os.ModePerm); err != nil {\n\t\tctx.Fatalf(\"failed to write iop cr file: %v\", err)\n\t}\n\n\treturn overrideValuesFile\n}\n\n\/\/ performInPlaceUpgradeFunc returns the provided function necessary to run inside of a integration test\n\/\/ for upgrade capability\nfunc performInPlaceUpgradeFunc(previousVersion string) func(framework.TestContext) {\n\treturn func(t framework.TestContext) {\n\t\tcs := t.Clusters().Default().(*kubecluster.Cluster)\n\t\th := helm.New(cs.Filename(), filepath.Join(previousChartPath, previousVersion))\n\n\t\tt.ConditionalCleanup(func() {\n\t\t\t\/\/ only need to do call this once as helm doesn't need to remove\n\t\t\t\/\/ all versions\n\t\t\terr := deleteIstio(cs, h)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"could not delete istio: %v\", err)\n\t\t\t}\n\t\t})\n\n\t\toverrideValuesFile := getValuesOverrides(t, defaultValues, gcrHub, previousVersion)\n\t\thelmtest.InstallIstio(t, cs, h, tarGzSuffix, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\toldClient, oldServer := sanitycheck.SetupTrafficTest(t, t, \"\")\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, oldServer)\n\n\t\t\/\/ now upgrade istio to the latest version found in this branch\n\t\t\/\/ use the command line or environmental vars from the user to set\n\t\t\/\/ the hub\/tag\n\t\ts, err := image.SettingsFromCommandLine()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\toverrideValuesFile = getValuesOverrides(t, defaultValues, s.Hub, s.Tag)\n\t\tupgradeCharts(t, h, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\tnewClient, newServer := sanitycheck.SetupTrafficTest(t, t, \"\")\n\t\tsanitycheck.RunTrafficTestClientServer(t, newClient, newServer)\n\n\t\t\/\/ now check that we are compatible with N-1 proxy with N proxy\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, newServer)\n\t}\n}\n\n\/\/ performRevisionUpgradeFunc returns the provided function necessary to run inside of a integration test\n\/\/ for upgrade capability with revisions\nfunc performRevisionUpgradeFunc(previousVersion string) func(framework.TestContext) {\n\treturn func(t framework.TestContext) {\n\t\tcs := t.Clusters().Default().(*kubecluster.Cluster)\n\t\th := helm.New(cs.Filename(), filepath.Join(previousChartPath, previousVersion))\n\n\t\tt.ConditionalCleanup(func() {\n\t\t\terr := deleteIstioRevision(h, revisionChartSuffix)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"could not delete istio: %v\", err)\n\t\t\t}\n\t\t\terr = deleteIstio(cs, h)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"could not delete istio: %v\", err)\n\t\t\t}\n\t\t})\n\n\t\toverrideValuesFile := getValuesOverrides(t, defaultValues, gcrHub, previousVersion)\n\t\thelmtest.InstallIstio(t, cs, h, tarGzSuffix, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\toldClient, oldServer := sanitycheck.SetupTrafficTest(t, t, \"\")\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, oldServer)\n\n\t\t\/\/ now upgrade istio to the latest version found in this branch\n\t\t\/\/ use the command line or environmental vars from the user to set\n\t\t\/\/ the hub\/tag\n\t\ts, err := image.SettingsFromCommandLine()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\toverrideValuesFile = getValuesOverrides(t, revisionValues, s.Hub, s.Tag)\n\t\thelmtest.InstallIstioWithRevision(t, cs, h, tarGzSuffix, revisionChartSuffix, overrideValuesFile)\n\t\thelmtest.VerifyInstallation(t, cs)\n\n\t\tnewClient, newServer := sanitycheck.SetupTrafficTest(t, t, revisionLabel)\n\t\tsanitycheck.RunTrafficTestClientServer(t, newClient, newServer)\n\n\t\t\/\/ now check that we are compatible with N-1 proxy with N proxy\n\t\tsanitycheck.RunTrafficTestClientServer(t, oldClient, newServer)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n \"io\/ioutil\"\n \"net\/http\"\n \"net\/url\"\n \"fmt\"\n)\n\ntype API struct {\n token string\n endpoint string\n}\n\nfunc New(token string) *API {\n\n return &API{\n \t token: token,\n\t endpoint: \"https:\/\/api.collection.cooperhewitt.org\/rest\/\",\n }\n}\n\nfunction ExecuteMethod (api *API, method string, params *url.Values) ([]byte error) {\n\n\tparams.Set(\"method\", method)\n\tparams.Set(\"access_token\", api.token)\n\n\treq, err := http.NewRequest(\"POST\", api.endpoint, params)\n\n\tclient := &http.Client{}\n\trsp, err := client.Do(req)\n\n\tif err != nil {\n \tpanic(err)\n\t}\n\n\tdefer rsp.Body.Close()\n\n\tfmt.Println(\"response Status:\", rsp.Status)\n\tfmt.Println(\"response Headers:\", rsp.Header)\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\treturn body\n}\n\n\n<commit_msg>build but still doesn't quite work<commit_after>package api\n\nimport (\n \"io\/ioutil\"\n \"net\/http\"\n \"net\/url\"\n \"fmt\"\n)\n\ntype API struct {\n token string\n endpoint string\n}\n\nfunc Client(token string) *API {\n\n return &API{\n \t token: token,\n\t endpoint: \"https:\/\/api.collection.cooperhewitt.org\/rest\/\",\n }\n}\n\nfunc (api *API) ExecuteMethod (method string, params *url.Values) ([]byte, error) {\n\n\tparams.Set(\"method\", method)\n\tparams.Set(\"access_token\", api.token)\n\n\treq, err := http.NewRequest(\"POST\", api.endpoint, nil)\n\treq.URL.RawQuery = (*params).Encode()\n\n\tclient := &http.Client{}\n\trsp, err := client.Do(req)\n\n\tif err != nil {\n \tpanic(err)\n\t}\n\n\tdefer rsp.Body.Close()\n\n\tfmt.Println(\"response Status:\", rsp.Status)\n\tfmt.Println(\"response Headers:\", rsp.Header)\n\n\tbody, _ := ioutil.ReadAll(rsp.Body)\n\treturn body, nil\n}\n\n\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage awstasks\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/cloudup\/awsup\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/cloudup\/terraform\"\n)\n\n\/\/go:generate fitask -type=NatGateway\ntype NatGateway struct {\n\tName *string\n\tElasticIP *ElasticIP\n\tSubnet *Subnet\n\tID *string\n\n\tEgressId *string\n\n\t\/\/ Shared is set if this is a shared NatGateway\n\tShared *bool\n\n\t\/\/ We can't tag NatGateways, so we have to find through a surrogate\n\tAssociatedRouteTable *RouteTable\n}\n\nvar _ fi.CompareWithID = &NatGateway{} \/\/ Validate the IDs\n\nfunc (e *NatGateway) CompareWithID() *string {\n\treturn e.ID\n}\n\nfunc (e *NatGateway) Find(c *fi.Context) (*NatGateway, error) {\n\n\tcloud := c.Cloud.(awsup.AWSCloud)\n\tvar ngw *ec2.NatGateway\n\tactual := &NatGateway{}\n\n\tif e.ID != nil && *e.ID != \"\" {\n\t\t\/\/ We have an existing NGW, lets look up the EIP\n\t\tvar ngwIds []*string\n\t\tngwIds = append(ngwIds, e.ID)\n\n\t\trequest := &ec2.DescribeNatGatewaysInput{\n\t\t\tNatGatewayIds: ngwIds,\n\t\t}\n\n\t\tresponse, err := cloud.EC2().DescribeNatGateways(request)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error listing Nat Gateways %v\", err)\n\t\t}\n\n\t\tif len(response.NatGateways) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"found %b NAT Gateways, expected 1\", len(response.NatGateways))\n\t\t}\n\t\tif len(response.NatGateways) == 1 {\n\t\t\tngw = response.NatGateways[0]\n\t\t}\n\n\t\tif len(response.NatGateways[0].NatGatewayAddresses) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"found %b EIP Addresses for 1 NAT Gateway, expected 1\", len(response.NatGateways))\n\t\t}\n\t\tif len(response.NatGateways[0].NatGatewayAddresses) == 1 {\n\n\t\t\tactual.ElasticIP = &ElasticIP{ID: response.NatGateways[0].NatGatewayAddresses[0].AllocationId}\n\n\t\t}\n\t} else {\n\t\t\/\/ This is the normal\/default path\n\t\tngw, err := e.findNatGateway(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ngw == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tactual.ID = ngw.NatGatewayId\n\n\tactual.Subnet = e.Subnet\n\tif len(ngw.NatGatewayAddresses) == 0 {\n\t\t\/\/ Not sure if this ever happens\n\t\tactual.ElasticIP = nil\n\t} else if len(ngw.NatGatewayAddresses) == 1 {\n\t\tactual.ElasticIP = &ElasticIP{ID: ngw.NatGatewayAddresses[0].AllocationId}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"found multiple elastic IPs attached to NatGateway %q\", aws.StringValue(ngw.NatGatewayId))\n\t}\n\n\t\/\/ NATGateways don't have a Name (no tags), so we set the name to avoid spurious changes\n\tactual.Name = e.Name\n\n\tactual.AssociatedRouteTable = e.AssociatedRouteTable\n\n\te.ID = actual.ID\n\treturn actual, nil\n}\n\nfunc (e *NatGateway) findNatGateway(c *fi.Context) (*ec2.NatGateway, error) {\n\tcloud := c.Cloud.(awsup.AWSCloud)\n\n\tid := e.ID\n\n\t\/\/ Find via route on private route table\n\tif id == nil && e.AssociatedRouteTable != nil {\n\t\tngw, err := findNatGatewayFromRouteTable(cloud, e.AssociatedRouteTable)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ngw != nil {\n\t\t\treturn ngw, nil\n\t\t}\n\t}\n\n\t\/\/ Find via tag on subnet\n\t\/\/ TODO: Obsolete - we can get from the route table instead\n\tif id == nil && e.Subnet != nil {\n\t\tvar filters []*ec2.Filter\n\t\tfilters = append(filters, awsup.NewEC2Filter(\"key\", \"AssociatedNatgateway\"))\n\t\tif e.Subnet.ID == nil {\n\t\t\tglog.V(2).Infof(\"Unable to find subnet, bypassing Find() for NatGateway\")\n\t\t\treturn nil, nil\n\t\t}\n\t\tfilters = append(filters, awsup.NewEC2Filter(\"resource-id\", *e.Subnet.ID))\n\n\t\trequest := &ec2.DescribeTagsInput{\n\t\t\tFilters: filters,\n\t\t}\n\n\t\tresponse, err := cloud.EC2().DescribeTags(request)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error listing tags: %v\", err)\n\t\t}\n\n\t\tif response == nil || len(response.Tags) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tif len(response.Tags) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"found multiple tags for: %v\", e)\n\t\t}\n\t\tt := response.Tags[0]\n\t\tid = t.Value\n\t\tglog.V(2).Infof(\"Found NatGateway via subnet tag: %v\", *id)\n\t}\n\n\tif id != nil {\n\t\treturn findNatGatewayById(cloud, id)\n\t}\n\n\treturn nil, nil\n}\n\nfunc findNatGatewayById(cloud awsup.AWSCloud, id *string) (*ec2.NatGateway, error) {\n\trequest := &ec2.DescribeNatGatewaysInput{}\n\trequest.NatGatewayIds = []*string{id}\n\tresponse, err := cloud.EC2().DescribeNatGateways(request)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error listing NatGateway %q: %v\", id, err)\n\t}\n\n\tif response == nil || len(response.NatGateways) == 0 {\n\t\tglog.V(2).Infof(\"Unable to find NatGateway %q\", id)\n\t\treturn nil, nil\n\t}\n\tif len(response.NatGateways) != 1 {\n\t\treturn nil, fmt.Errorf(\"found multiple NatGateways with id %q\", id)\n\t}\n\treturn response.NatGateways[0], nil\n}\n\nfunc findNatGatewayFromRouteTable(cloud awsup.AWSCloud, routeTable *RouteTable) (*ec2.NatGateway, error) {\n\t\/\/ Find via route on private route table\n\tif routeTable.ID != nil {\n\t\tglog.V(2).Infof(\"trying to match NatGateway via RouteTable %s\", routeTable.ID)\n\t\trt, err := routeTable.findEc2RouteTable(cloud)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error finding associated RouteTable to NatGateway: %v\", err)\n\t\t}\n\n\t\tif rt != nil {\n\t\t\tvar natGatewayIDs []*string\n\t\t\tfor _, route := range rt.Routes {\n\t\t\t\tif route.NatGatewayId != nil {\n\t\t\t\t\tnatGatewayIDs = append(natGatewayIDs, route.NatGatewayId)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(natGatewayIDs) == 0 {\n\t\t\t\tglog.V(2).Infof(\"no NatGateway found in route table %s\", *rt.RouteTableId)\n\t\t\t} else if len(natGatewayIDs) > 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"found multiple NatGateways in route table %s\", *rt.RouteTableId)\n\t\t\t} else {\n\t\t\t\treturn findNatGatewayById(cloud, natGatewayIDs[0])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\nfunc (s *NatGateway) CheckChanges(a, e, changes *NatGateway) error {\n\t\/\/ New\n\tif a == nil {\n\t\tif e.ElasticIP == nil {\n\t\t\treturn fi.RequiredField(\"ElasticIp\")\n\t\t}\n\t\tif e.Subnet == nil {\n\t\t\treturn fi.RequiredField(\"Subnet\")\n\t\t}\n\t}\n\n\t\/\/ Delta\n\tif a != nil {\n\t\tif changes.ElasticIP != nil {\n\t\t\treturn fi.CannotChangeField(\"ElasticIp\")\n\t\t}\n\t\tif changes.Subnet != nil {\n\t\t\treturn fi.CannotChangeField(\"Subnet\")\n\t\t}\n\t\tif changes.ID != nil {\n\t\t\treturn fi.CannotChangeField(\"ID\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *NatGateway) Run(c *fi.Context) error {\n\treturn fi.DefaultDeltaRunMethod(e, c)\n}\n\nfunc (e *NatGateway) waitAvailable(cloud awsup.AWSCloud) error {\n\t\/\/ It takes 'forever' (up to 5 min...) for a NatGateway to become available after it has been created\n\t\/\/ We have to wait until it is actually up\n\n\t\/\/ TODO: Cache availability status\n\n\tid := aws.StringValue(e.ID)\n\tif id == \"\" {\n\t\treturn fmt.Errorf(\"NAT Gateway %q did not have ID\", e.Name)\n\t}\n\n\tglog.Infof(\"Waiting for NAT Gateway %q to be available\", id)\n\tparams := &ec2.DescribeNatGatewaysInput{\n\t\tNatGatewayIds: []*string{e.ID},\n\t}\n\terr := cloud.EC2().WaitUntilNatGatewayAvailable(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error waiting for NAT Gateway %q to be available: %v\", id, err)\n\t}\n\n\treturn nil\n}\n\nfunc (_ *NatGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *NatGateway) error {\n\t\/\/ New NGW\n\n\tvar id *string\n\tif a == nil {\n\t\tglog.V(2).Infof(\"Creating Nat Gateway\")\n\n\t\trequest := &ec2.CreateNatGatewayInput{}\n\t\trequest.AllocationId = e.ElasticIP.ID\n\t\trequest.SubnetId = e.Subnet.ID\n\t\tresponse, err := t.Cloud.EC2().CreateNatGateway(request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error creating Nat Gateway: %v\", err)\n\t\t}\n\t\te.ID = response.NatGateway.NatGatewayId\n\t\tid = e.ID\n\t} else {\n\t\tid = a.ID\n\t}\n\n\t\/\/ Tag the associated subnet\n\tif e.Subnet == nil {\n\t\treturn fmt.Errorf(\"Subnet not set\")\n\t} else if e.Subnet.ID == nil {\n\t\treturn fmt.Errorf(\"Subnet ID not set\")\n\t}\n\n\t\/\/ TODO: Obsolete - we can get from the route table instead\n\ttags := make(map[string]string)\n\ttags[\"AssociatedNatgateway\"] = *id\n\terr := t.AddAWSTags(*e.Subnet.ID, tags)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to tag subnet %v\", err)\n\t}\n\n\t\/\/ If this is a shared NGW, we need to tag it\n\t\/\/ The tag that implies \"shared\" is `AssociatedNatgateway`=> NGW-ID\n\t\/\/ This is better than just a tag that's shared because this lets us create a whitelist of these NGWs\n\t\/\/ without doing a bunch more work in `kutil\/delete_cluster.go`\n\n\tif *e.Shared == true {\n\t\tglog.V(2).Infof(\"tagging route table %s to track shared NGW\", *e.AssociatedRouteTable.ID)\n\t\terr = t.AddAWSTags(*e.AssociatedRouteTable.ID, tags)\n\t}\n\n\treturn nil\n}\n\ntype terraformNATGateway struct {\n\tAllocationID *terraform.Literal `json:\"allocation_id,omitempty\"`\n\tSubnetID *terraform.Literal `json:\"subnet_id,omitempty\"`\n}\n\nfunc (_ *NatGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *NatGateway) error {\n\ttf := &terraformNATGateway{\n\t\tAllocationID: e.ElasticIP.TerraformLink(),\n\t\tSubnetID: e.Subnet.TerraformLink(),\n\t}\n\n\treturn t.RenderResource(\"aws_nat_gateway\", *e.Name, tf)\n}\n\nfunc (e *NatGateway) TerraformLink() *terraform.Literal {\n\treturn terraform.LiteralProperty(\"aws_nat_gateway\", *e.Name, \"id\")\n}\n<commit_msg>Add message about expected time for NAT gateway creation<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage awstasks\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/cloudup\/awsup\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/cloudup\/terraform\"\n)\n\n\/\/go:generate fitask -type=NatGateway\ntype NatGateway struct {\n\tName *string\n\tElasticIP *ElasticIP\n\tSubnet *Subnet\n\tID *string\n\n\tEgressId *string\n\n\t\/\/ Shared is set if this is a shared NatGateway\n\tShared *bool\n\n\t\/\/ We can't tag NatGateways, so we have to find through a surrogate\n\tAssociatedRouteTable *RouteTable\n}\n\nvar _ fi.CompareWithID = &NatGateway{} \/\/ Validate the IDs\n\nfunc (e *NatGateway) CompareWithID() *string {\n\treturn e.ID\n}\n\nfunc (e *NatGateway) Find(c *fi.Context) (*NatGateway, error) {\n\n\tcloud := c.Cloud.(awsup.AWSCloud)\n\tvar ngw *ec2.NatGateway\n\tactual := &NatGateway{}\n\n\tif e.ID != nil && *e.ID != \"\" {\n\t\t\/\/ We have an existing NGW, lets look up the EIP\n\t\tvar ngwIds []*string\n\t\tngwIds = append(ngwIds, e.ID)\n\n\t\trequest := &ec2.DescribeNatGatewaysInput{\n\t\t\tNatGatewayIds: ngwIds,\n\t\t}\n\n\t\tresponse, err := cloud.EC2().DescribeNatGateways(request)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error listing Nat Gateways %v\", err)\n\t\t}\n\n\t\tif len(response.NatGateways) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"found %b NAT Gateways, expected 1\", len(response.NatGateways))\n\t\t}\n\t\tif len(response.NatGateways) == 1 {\n\t\t\tngw = response.NatGateways[0]\n\t\t}\n\n\t\tif len(response.NatGateways[0].NatGatewayAddresses) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"found %b EIP Addresses for 1 NAT Gateway, expected 1\", len(response.NatGateways))\n\t\t}\n\t\tif len(response.NatGateways[0].NatGatewayAddresses) == 1 {\n\n\t\t\tactual.ElasticIP = &ElasticIP{ID: response.NatGateways[0].NatGatewayAddresses[0].AllocationId}\n\n\t\t}\n\t} else {\n\t\t\/\/ This is the normal\/default path\n\t\tngw, err := e.findNatGateway(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ngw == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tactual.ID = ngw.NatGatewayId\n\n\tactual.Subnet = e.Subnet\n\tif len(ngw.NatGatewayAddresses) == 0 {\n\t\t\/\/ Not sure if this ever happens\n\t\tactual.ElasticIP = nil\n\t} else if len(ngw.NatGatewayAddresses) == 1 {\n\t\tactual.ElasticIP = &ElasticIP{ID: ngw.NatGatewayAddresses[0].AllocationId}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"found multiple elastic IPs attached to NatGateway %q\", aws.StringValue(ngw.NatGatewayId))\n\t}\n\n\t\/\/ NATGateways don't have a Name (no tags), so we set the name to avoid spurious changes\n\tactual.Name = e.Name\n\n\tactual.AssociatedRouteTable = e.AssociatedRouteTable\n\n\te.ID = actual.ID\n\treturn actual, nil\n}\n\nfunc (e *NatGateway) findNatGateway(c *fi.Context) (*ec2.NatGateway, error) {\n\tcloud := c.Cloud.(awsup.AWSCloud)\n\n\tid := e.ID\n\n\t\/\/ Find via route on private route table\n\tif id == nil && e.AssociatedRouteTable != nil {\n\t\tngw, err := findNatGatewayFromRouteTable(cloud, e.AssociatedRouteTable)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ngw != nil {\n\t\t\treturn ngw, nil\n\t\t}\n\t}\n\n\t\/\/ Find via tag on subnet\n\t\/\/ TODO: Obsolete - we can get from the route table instead\n\tif id == nil && e.Subnet != nil {\n\t\tvar filters []*ec2.Filter\n\t\tfilters = append(filters, awsup.NewEC2Filter(\"key\", \"AssociatedNatgateway\"))\n\t\tif e.Subnet.ID == nil {\n\t\t\tglog.V(2).Infof(\"Unable to find subnet, bypassing Find() for NatGateway\")\n\t\t\treturn nil, nil\n\t\t}\n\t\tfilters = append(filters, awsup.NewEC2Filter(\"resource-id\", *e.Subnet.ID))\n\n\t\trequest := &ec2.DescribeTagsInput{\n\t\t\tFilters: filters,\n\t\t}\n\n\t\tresponse, err := cloud.EC2().DescribeTags(request)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error listing tags: %v\", err)\n\t\t}\n\n\t\tif response == nil || len(response.Tags) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tif len(response.Tags) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"found multiple tags for: %v\", e)\n\t\t}\n\t\tt := response.Tags[0]\n\t\tid = t.Value\n\t\tglog.V(2).Infof(\"Found NatGateway via subnet tag: %v\", *id)\n\t}\n\n\tif id != nil {\n\t\treturn findNatGatewayById(cloud, id)\n\t}\n\n\treturn nil, nil\n}\n\nfunc findNatGatewayById(cloud awsup.AWSCloud, id *string) (*ec2.NatGateway, error) {\n\trequest := &ec2.DescribeNatGatewaysInput{}\n\trequest.NatGatewayIds = []*string{id}\n\tresponse, err := cloud.EC2().DescribeNatGateways(request)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error listing NatGateway %q: %v\", id, err)\n\t}\n\n\tif response == nil || len(response.NatGateways) == 0 {\n\t\tglog.V(2).Infof(\"Unable to find NatGateway %q\", id)\n\t\treturn nil, nil\n\t}\n\tif len(response.NatGateways) != 1 {\n\t\treturn nil, fmt.Errorf(\"found multiple NatGateways with id %q\", id)\n\t}\n\treturn response.NatGateways[0], nil\n}\n\nfunc findNatGatewayFromRouteTable(cloud awsup.AWSCloud, routeTable *RouteTable) (*ec2.NatGateway, error) {\n\t\/\/ Find via route on private route table\n\tif routeTable.ID != nil {\n\t\tglog.V(2).Infof(\"trying to match NatGateway via RouteTable %s\", routeTable.ID)\n\t\trt, err := routeTable.findEc2RouteTable(cloud)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error finding associated RouteTable to NatGateway: %v\", err)\n\t\t}\n\n\t\tif rt != nil {\n\t\t\tvar natGatewayIDs []*string\n\t\t\tfor _, route := range rt.Routes {\n\t\t\t\tif route.NatGatewayId != nil {\n\t\t\t\t\tnatGatewayIDs = append(natGatewayIDs, route.NatGatewayId)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(natGatewayIDs) == 0 {\n\t\t\t\tglog.V(2).Infof(\"no NatGateway found in route table %s\", *rt.RouteTableId)\n\t\t\t} else if len(natGatewayIDs) > 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"found multiple NatGateways in route table %s\", *rt.RouteTableId)\n\t\t\t} else {\n\t\t\t\treturn findNatGatewayById(cloud, natGatewayIDs[0])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\nfunc (s *NatGateway) CheckChanges(a, e, changes *NatGateway) error {\n\t\/\/ New\n\tif a == nil {\n\t\tif e.ElasticIP == nil {\n\t\t\treturn fi.RequiredField(\"ElasticIp\")\n\t\t}\n\t\tif e.Subnet == nil {\n\t\t\treturn fi.RequiredField(\"Subnet\")\n\t\t}\n\t}\n\n\t\/\/ Delta\n\tif a != nil {\n\t\tif changes.ElasticIP != nil {\n\t\t\treturn fi.CannotChangeField(\"ElasticIp\")\n\t\t}\n\t\tif changes.Subnet != nil {\n\t\t\treturn fi.CannotChangeField(\"Subnet\")\n\t\t}\n\t\tif changes.ID != nil {\n\t\t\treturn fi.CannotChangeField(\"ID\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *NatGateway) Run(c *fi.Context) error {\n\treturn fi.DefaultDeltaRunMethod(e, c)\n}\n\nfunc (e *NatGateway) waitAvailable(cloud awsup.AWSCloud) error {\n\t\/\/ It takes 'forever' (up to 5 min...) for a NatGateway to become available after it has been created\n\t\/\/ We have to wait until it is actually up\n\n\t\/\/ TODO: Cache availability status\n\n\tid := aws.StringValue(e.ID)\n\tif id == \"\" {\n\t\treturn fmt.Errorf(\"NAT Gateway %q did not have ID\", e.Name)\n\t}\n\n\tglog.Infof(\"Waiting for NAT Gateway %q to be available (this often takes about 5 minutes)\", id)\n\tparams := &ec2.DescribeNatGatewaysInput{\n\t\tNatGatewayIds: []*string{e.ID},\n\t}\n\terr := cloud.EC2().WaitUntilNatGatewayAvailable(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error waiting for NAT Gateway %q to be available: %v\", id, err)\n\t}\n\n\treturn nil\n}\n\nfunc (_ *NatGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *NatGateway) error {\n\t\/\/ New NGW\n\n\tvar id *string\n\tif a == nil {\n\t\tglog.V(2).Infof(\"Creating Nat Gateway\")\n\n\t\trequest := &ec2.CreateNatGatewayInput{}\n\t\trequest.AllocationId = e.ElasticIP.ID\n\t\trequest.SubnetId = e.Subnet.ID\n\t\tresponse, err := t.Cloud.EC2().CreateNatGateway(request)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error creating Nat Gateway: %v\", err)\n\t\t}\n\t\te.ID = response.NatGateway.NatGatewayId\n\t\tid = e.ID\n\t} else {\n\t\tid = a.ID\n\t}\n\n\t\/\/ Tag the associated subnet\n\tif e.Subnet == nil {\n\t\treturn fmt.Errorf(\"Subnet not set\")\n\t} else if e.Subnet.ID == nil {\n\t\treturn fmt.Errorf(\"Subnet ID not set\")\n\t}\n\n\t\/\/ TODO: Obsolete - we can get from the route table instead\n\ttags := make(map[string]string)\n\ttags[\"AssociatedNatgateway\"] = *id\n\terr := t.AddAWSTags(*e.Subnet.ID, tags)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to tag subnet %v\", err)\n\t}\n\n\t\/\/ If this is a shared NGW, we need to tag it\n\t\/\/ The tag that implies \"shared\" is `AssociatedNatgateway`=> NGW-ID\n\t\/\/ This is better than just a tag that's shared because this lets us create a whitelist of these NGWs\n\t\/\/ without doing a bunch more work in `kutil\/delete_cluster.go`\n\n\tif *e.Shared == true {\n\t\tglog.V(2).Infof(\"tagging route table %s to track shared NGW\", *e.AssociatedRouteTable.ID)\n\t\terr = t.AddAWSTags(*e.AssociatedRouteTable.ID, tags)\n\t}\n\n\treturn nil\n}\n\ntype terraformNATGateway struct {\n\tAllocationID *terraform.Literal `json:\"allocation_id,omitempty\"`\n\tSubnetID *terraform.Literal `json:\"subnet_id,omitempty\"`\n}\n\nfunc (_ *NatGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *NatGateway) error {\n\ttf := &terraformNATGateway{\n\t\tAllocationID: e.ElasticIP.TerraformLink(),\n\t\tSubnetID: e.Subnet.TerraformLink(),\n\t}\n\n\treturn t.RenderResource(\"aws_nat_gateway\", *e.Name, tf)\n}\n\nfunc (e *NatGateway) TerraformLink() *terraform.Literal {\n\treturn terraform.LiteralProperty(\"aws_nat_gateway\", *e.Name, \"id\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Prometheus Team\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cli\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/pkg\/errors\"\n\tkingpin \"gopkg.in\/alecthomas\/kingpin.v2\"\n\n\t\"github.com\/prometheus\/alertmanager\/api\/v2\/client\/silence\"\n\t\"github.com\/prometheus\/alertmanager\/api\/v2\/models\"\n)\n\ntype silenceImportCmd struct {\n\tforce bool\n\tworkers int\n\tfile string\n}\n\nconst silenceImportHelp = `Import alertmanager silences from JSON file or stdin\n\nThis command can be used to bulk import silences from a JSON file\ncreated by query command. For example:\n\namtool silence query -o json foo > foo.json\n\namtool silence import foo.json\n\nJSON data can also come from stdin if no param is specified.\n`\n\nfunc configureSilenceImportCmd(cc *kingpin.CmdClause) {\n\tvar (\n\t\tc = &silenceImportCmd{}\n\t\timportCmd = cc.Command(\"import\", silenceImportHelp)\n\t)\n\n\timportCmd.Flag(\"force\", \"Force adding new silences even if it already exists\").Short('f').BoolVar(&c.force)\n\timportCmd.Flag(\"worker\", \"Number of concurrent workers to use for import\").Short('w').Default(\"8\").IntVar(&c.workers)\n\timportCmd.Arg(\"input-file\", \"JSON file with silences\").ExistingFileVar(&c.file)\n\timportCmd.Action(execWithTimeout(c.bulkImport))\n}\n\nfunc addSilenceWorker(ctx context.Context, sclient *silence.Client, silencec <-chan *models.PostableSilence, errc chan<- error) {\n\tfor s := range silencec {\n\t\tsid := s.ID\n\t\tparams := silence.NewPostSilencesParams().WithContext(ctx).WithSilence(s)\n\t\tpostOk, err := sclient.PostSilences(params)\n\t\tif err != nil && strings.Contains(err.Error(), \"not found\") {\n\t\t\t\/\/ silence doesn't exists yet, retry to create as a new one\n\t\t\tparams.Silence.ID = \"\"\n\t\t\tpostOk, err = sclient.PostSilences(params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error adding silence id='%v': %v\\n\", sid, err)\n\t\t} else {\n\t\t\tfmt.Println(postOk.Payload.SilenceID)\n\t\t}\n\t\terrc <- err\n\t}\n}\n\nfunc (c *silenceImportCmd) bulkImport(ctx context.Context, _ *kingpin.ParseContext) error {\n\tinput := os.Stdin\n\tvar err error\n\tif c.file != \"\" {\n\t\tinput, err = os.Open(c.file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer input.Close()\n\t}\n\n\tdec := json.NewDecoder(input)\n\t\/\/ read open square bracket\n\t_, err = dec.Token()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't unmarshal input data, is it JSON?\")\n\t}\n\n\tamclient := NewAlertmanagerClient(alertmanagerURL)\n\tsilencec := make(chan *models.PostableSilence, 100)\n\terrc := make(chan error, 100)\n\tvar wg sync.WaitGroup\n\tfor w := 0; w < c.workers; w++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\taddSilenceWorker(ctx, amclient.Silence, silencec, errc)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\terrCount := 0\n\tgo func() {\n\t\tfor err := range errc {\n\t\t\tif err != nil {\n\t\t\t\terrCount++\n\t\t\t}\n\t\t}\n\t}()\n\n\tcount := 0\n\tfor dec.More() {\n\t\tvar s models.PostableSilence\n\t\terr := dec.Decode(&s)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"couldn't unmarshal input data, is it JSON?\")\n\t\t}\n\n\t\tif c.force {\n\t\t\t\/\/ reset the silence ID so Alertmanager will always create new silence\n\t\t\ts.ID = \"\"\n\t\t}\n\n\t\tsilencec <- &s\n\t\tcount++\n\t}\n\n\tclose(silencec)\n\twg.Wait()\n\tclose(errc)\n\n\tif errCount > 0 {\n\t\treturn fmt.Errorf(\"couldn't import %v out of %v silences\", errCount, count)\n\t}\n\treturn nil\n}\n<commit_msg>cli: check for NotFound error during silence import<commit_after>\/\/ Copyright 2018 Prometheus Team\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cli\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/pkg\/errors\"\n\tkingpin \"gopkg.in\/alecthomas\/kingpin.v2\"\n\n\t\"github.com\/prometheus\/alertmanager\/api\/v2\/client\/silence\"\n\t\"github.com\/prometheus\/alertmanager\/api\/v2\/models\"\n)\n\ntype silenceImportCmd struct {\n\tforce bool\n\tworkers int\n\tfile string\n}\n\nconst silenceImportHelp = `Import alertmanager silences from JSON file or stdin\n\nThis command can be used to bulk import silences from a JSON file\ncreated by query command. For example:\n\namtool silence query -o json foo > foo.json\n\namtool silence import foo.json\n\nJSON data can also come from stdin if no param is specified.\n`\n\nfunc configureSilenceImportCmd(cc *kingpin.CmdClause) {\n\tvar (\n\t\tc = &silenceImportCmd{}\n\t\timportCmd = cc.Command(\"import\", silenceImportHelp)\n\t)\n\n\timportCmd.Flag(\"force\", \"Force adding new silences even if it already exists\").Short('f').BoolVar(&c.force)\n\timportCmd.Flag(\"worker\", \"Number of concurrent workers to use for import\").Short('w').Default(\"8\").IntVar(&c.workers)\n\timportCmd.Arg(\"input-file\", \"JSON file with silences\").ExistingFileVar(&c.file)\n\timportCmd.Action(execWithTimeout(c.bulkImport))\n}\n\nfunc addSilenceWorker(ctx context.Context, sclient *silence.Client, silencec <-chan *models.PostableSilence, errc chan<- error) {\n\tfor s := range silencec {\n\t\tsid := s.ID\n\t\tparams := silence.NewPostSilencesParams().WithContext(ctx).WithSilence(s)\n\t\tpostOk, err := sclient.PostSilences(params)\n\t\tif _, ok := err.(*silence.PostSilencesNotFound); ok {\n\t\t\t\/\/ silence doesn't exists yet, retry to create as a new one\n\t\t\tparams.Silence.ID = \"\"\n\t\t\tpostOk, err = sclient.PostSilences(params)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error adding silence id='%v': %v\\n\", sid, err)\n\t\t} else {\n\t\t\tfmt.Println(postOk.Payload.SilenceID)\n\t\t}\n\t\terrc <- err\n\t}\n}\n\nfunc (c *silenceImportCmd) bulkImport(ctx context.Context, _ *kingpin.ParseContext) error {\n\tinput := os.Stdin\n\tvar err error\n\tif c.file != \"\" {\n\t\tinput, err = os.Open(c.file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer input.Close()\n\t}\n\n\tdec := json.NewDecoder(input)\n\t\/\/ read open square bracket\n\t_, err = dec.Token()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't unmarshal input data, is it JSON?\")\n\t}\n\n\tamclient := NewAlertmanagerClient(alertmanagerURL)\n\tsilencec := make(chan *models.PostableSilence, 100)\n\terrc := make(chan error, 100)\n\tvar wg sync.WaitGroup\n\tfor w := 0; w < c.workers; w++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\taddSilenceWorker(ctx, amclient.Silence, silencec, errc)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\terrCount := 0\n\tgo func() {\n\t\tfor err := range errc {\n\t\t\tif err != nil {\n\t\t\t\terrCount++\n\t\t\t}\n\t\t}\n\t}()\n\n\tcount := 0\n\tfor dec.More() {\n\t\tvar s models.PostableSilence\n\t\terr := dec.Decode(&s)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"couldn't unmarshal input data, is it JSON?\")\n\t\t}\n\n\t\tif c.force {\n\t\t\t\/\/ reset the silence ID so Alertmanager will always create new silence\n\t\t\ts.ID = \"\"\n\t\t}\n\n\t\tsilencec <- &s\n\t\tcount++\n\t}\n\n\tclose(silencec)\n\twg.Wait()\n\tclose(errc)\n\n\tif errCount > 0 {\n\t\treturn fmt.Errorf(\"couldn't import %v out of %v silences\", errCount, count)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar writeSetCookiesTests = []struct {\n\tCookie *Cookie\n\tRaw string\n}{\n\t{\n\t\t&Cookie{Name: \"cookie-1\", Value: \"v$1\"},\n\t\t\"cookie-1=v$1\",\n\t},\n\t{\n\t\t&Cookie{Name: \"cookie-2\", Value: \"two\", MaxAge: 3600},\n\t\t\"cookie-2=two; Max-Age=3600\",\n\t},\n\t{\n\t\t&Cookie{Name: \"cookie-3\", Value: \"three\", Domain: \".example.com\"},\n\t\t\"cookie-3=three; Domain=.example.com\",\n\t},\n\t{\n\t\t&Cookie{Name: \"cookie-4\", Value: \"four\", Path: \"\/restricted\/\"},\n\t\t\"cookie-4=four; Path=\/restricted\/\",\n\t},\n}\n\nfunc TestWriteSetCookies(t *testing.T) {\n\tfor i, tt := range writeSetCookiesTests {\n\t\tif g, e := tt.Cookie.String(), tt.Raw; g != e {\n\t\t\tt.Errorf(\"Test %d, expecting:\\n%s\\nGot:\\n%s\\n\", i, e, g)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype headerOnlyResponseWriter Header\n\nfunc (ho headerOnlyResponseWriter) Header() Header {\n\treturn Header(ho)\n}\n\nfunc (ho headerOnlyResponseWriter) Write([]byte) (int, error) {\n\tpanic(\"NOIMPL\")\n}\n\nfunc (ho headerOnlyResponseWriter) WriteHeader(int) {\n\tpanic(\"NOIMPL\")\n}\n\nfunc TestSetCookie(t *testing.T) {\n\tm := make(Header)\n\tSetCookie(headerOnlyResponseWriter(m), &Cookie{Name: \"cookie-1\", Value: \"one\", Path: \"\/restricted\/\"})\n\tSetCookie(headerOnlyResponseWriter(m), &Cookie{Name: \"cookie-2\", Value: \"two\", MaxAge: 3600})\n\tif l := len(m[\"Set-Cookie\"]); l != 2 {\n\t\tt.Fatalf(\"expected %d cookies, got %d\", 2, l)\n\t}\n\tif g, e := m[\"Set-Cookie\"][0], \"cookie-1=one; Path=\/restricted\/\"; g != e {\n\t\tt.Errorf(\"cookie #1: want %q, got %q\", e, g)\n\t}\n\tif g, e := m[\"Set-Cookie\"][1], \"cookie-2=two; Max-Age=3600\"; g != e {\n\t\tt.Errorf(\"cookie #2: want %q, got %q\", e, g)\n\t}\n}\n\nvar addCookieTests = []struct {\n\tCookies []*Cookie\n\tRaw string\n}{\n\t{\n\t\t[]*Cookie{},\n\t\t\"\",\n\t},\n\t{\n\t\t[]*Cookie{{Name: \"cookie-1\", Value: \"v$1\"}},\n\t\t\"cookie-1=v$1\",\n\t},\n\t{\n\t\t[]*Cookie{\n\t\t\t{Name: \"cookie-1\", Value: \"v$1\"},\n\t\t\t{Name: \"cookie-2\", Value: \"v$2\"},\n\t\t\t{Name: \"cookie-3\", Value: \"v$3\"},\n\t\t},\n\t\t\"cookie-1=v$1; cookie-2=v$2; cookie-3=v$3\",\n\t},\n}\n\nfunc TestAddCookie(t *testing.T) {\n\tfor i, tt := range addCookieTests {\n\t\treq, _ := NewRequest(\"GET\", \"http:\/\/example.com\/\", nil)\n\t\tfor _, c := range tt.Cookies {\n\t\t\treq.AddCookie(c)\n\t\t}\n\t\tif g := req.Header.Get(\"Cookie\"); g != tt.Raw {\n\t\t\tt.Errorf(\"Test %d:\\nwant: %s\\n got: %s\\n\", i, tt.Raw, g)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar readSetCookiesTests = []struct {\n\tHeader Header\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Set-Cookie\": {\"Cookie-1=v$1\"}},\n\t\t[]*Cookie{{Name: \"Cookie-1\", Value: \"v$1\", Raw: \"Cookie-1=v$1\"}},\n\t},\n\t{\n\t\tHeader{\"Set-Cookie\": {\"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=\/; domain=.google.ch; HttpOnly\"}},\n\t\t[]*Cookie{{\n\t\t\tName: \"NID\",\n\t\t\tValue: \"99=YsDT5i3E-CXax-\",\n\t\t\tPath: \"\/\",\n\t\t\tDomain: \".google.ch\",\n\t\t\tHttpOnly: true,\n\t\t\tExpires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC),\n\t\t\tRawExpires: \"Wed, 23-Nov-2011 01:05:03 GMT\",\n\t\t\tRaw: \"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=\/; domain=.google.ch; HttpOnly\",\n\t\t}},\n\t},\n}\n\nfunc toJSON(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%#v\", v)\n\t}\n\treturn string(b)\n}\n\nfunc TestReadSetCookies(t *testing.T) {\n\tfor i, tt := range readSetCookiesTests {\n\t\tfor n := 0; n < 2; n++ { \/\/ to verify readSetCookies doesn't mutate its input\n\t\t\tc := readSetCookies(tt.Header)\n\t\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\t\tt.Errorf(\"#%d readSetCookies: have\\n%s\\nwant\\n%s\\n\", i, toJSON(c), toJSON(tt.Cookies))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar readCookiesTests = []struct {\n\tHeader Header\n\tFilter string\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1\", \"c2=v2\"}},\n\t\t\"\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"Cookie-1\", Value: \"v$1\"},\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1\", \"c2=v2\"}},\n\t\t\"c2\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1; c2=v2\"}},\n\t\t\"\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"Cookie-1\", Value: \"v$1\"},\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1; c2=v2\"}},\n\t\t\"c2\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n}\n\nfunc TestReadCookies(t *testing.T) {\n\tfor i, tt := range readCookiesTests {\n\t\tfor n := 0; n < 2; n++ { \/\/ to verify readCookies doesn't mutate its input \n\t\t\tc := readCookies(tt.Header, tt.Filter)\n\t\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\t\tt.Errorf(\"#%d readCookies:\\nhave: %s\\nwant: %s\\n\", i, toJSON(c), toJSON(tt.Cookies))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>net\/http: some more cookie tests<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar writeSetCookiesTests = []struct {\n\tCookie *Cookie\n\tRaw string\n}{\n\t{\n\t\t&Cookie{Name: \"cookie-1\", Value: \"v$1\"},\n\t\t\"cookie-1=v$1\",\n\t},\n\t{\n\t\t&Cookie{Name: \"cookie-2\", Value: \"two\", MaxAge: 3600},\n\t\t\"cookie-2=two; Max-Age=3600\",\n\t},\n\t{\n\t\t&Cookie{Name: \"cookie-3\", Value: \"three\", Domain: \".example.com\"},\n\t\t\"cookie-3=three; Domain=.example.com\",\n\t},\n\t{\n\t\t&Cookie{Name: \"cookie-4\", Value: \"four\", Path: \"\/restricted\/\"},\n\t\t\"cookie-4=four; Path=\/restricted\/\",\n\t},\n}\n\nfunc TestWriteSetCookies(t *testing.T) {\n\tfor i, tt := range writeSetCookiesTests {\n\t\tif g, e := tt.Cookie.String(), tt.Raw; g != e {\n\t\t\tt.Errorf(\"Test %d, expecting:\\n%s\\nGot:\\n%s\\n\", i, e, g)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype headerOnlyResponseWriter Header\n\nfunc (ho headerOnlyResponseWriter) Header() Header {\n\treturn Header(ho)\n}\n\nfunc (ho headerOnlyResponseWriter) Write([]byte) (int, error) {\n\tpanic(\"NOIMPL\")\n}\n\nfunc (ho headerOnlyResponseWriter) WriteHeader(int) {\n\tpanic(\"NOIMPL\")\n}\n\nfunc TestSetCookie(t *testing.T) {\n\tm := make(Header)\n\tSetCookie(headerOnlyResponseWriter(m), &Cookie{Name: \"cookie-1\", Value: \"one\", Path: \"\/restricted\/\"})\n\tSetCookie(headerOnlyResponseWriter(m), &Cookie{Name: \"cookie-2\", Value: \"two\", MaxAge: 3600})\n\tif l := len(m[\"Set-Cookie\"]); l != 2 {\n\t\tt.Fatalf(\"expected %d cookies, got %d\", 2, l)\n\t}\n\tif g, e := m[\"Set-Cookie\"][0], \"cookie-1=one; Path=\/restricted\/\"; g != e {\n\t\tt.Errorf(\"cookie #1: want %q, got %q\", e, g)\n\t}\n\tif g, e := m[\"Set-Cookie\"][1], \"cookie-2=two; Max-Age=3600\"; g != e {\n\t\tt.Errorf(\"cookie #2: want %q, got %q\", e, g)\n\t}\n}\n\nvar addCookieTests = []struct {\n\tCookies []*Cookie\n\tRaw string\n}{\n\t{\n\t\t[]*Cookie{},\n\t\t\"\",\n\t},\n\t{\n\t\t[]*Cookie{{Name: \"cookie-1\", Value: \"v$1\"}},\n\t\t\"cookie-1=v$1\",\n\t},\n\t{\n\t\t[]*Cookie{\n\t\t\t{Name: \"cookie-1\", Value: \"v$1\"},\n\t\t\t{Name: \"cookie-2\", Value: \"v$2\"},\n\t\t\t{Name: \"cookie-3\", Value: \"v$3\"},\n\t\t},\n\t\t\"cookie-1=v$1; cookie-2=v$2; cookie-3=v$3\",\n\t},\n}\n\nfunc TestAddCookie(t *testing.T) {\n\tfor i, tt := range addCookieTests {\n\t\treq, _ := NewRequest(\"GET\", \"http:\/\/example.com\/\", nil)\n\t\tfor _, c := range tt.Cookies {\n\t\t\treq.AddCookie(c)\n\t\t}\n\t\tif g := req.Header.Get(\"Cookie\"); g != tt.Raw {\n\t\t\tt.Errorf(\"Test %d:\\nwant: %s\\n got: %s\\n\", i, tt.Raw, g)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar readSetCookiesTests = []struct {\n\tHeader Header\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Set-Cookie\": {\"Cookie-1=v$1\"}},\n\t\t[]*Cookie{{Name: \"Cookie-1\", Value: \"v$1\", Raw: \"Cookie-1=v$1\"}},\n\t},\n\t{\n\t\tHeader{\"Set-Cookie\": {\"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=\/; domain=.google.ch; HttpOnly\"}},\n\t\t[]*Cookie{{\n\t\t\tName: \"NID\",\n\t\t\tValue: \"99=YsDT5i3E-CXax-\",\n\t\t\tPath: \"\/\",\n\t\t\tDomain: \".google.ch\",\n\t\t\tHttpOnly: true,\n\t\t\tExpires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC),\n\t\t\tRawExpires: \"Wed, 23-Nov-2011 01:05:03 GMT\",\n\t\t\tRaw: \"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=\/; domain=.google.ch; HttpOnly\",\n\t\t}},\n\t},\n\t{\n\t\tHeader{\"Set-Cookie\": {\".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=\/; HttpOnly\"}},\n\t\t[]*Cookie{{\n\t\t\tName: \".ASPXAUTH\",\n\t\t\tValue: \"7E3AA\",\n\t\t\tPath: \"\/\",\n\t\t\tExpires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC),\n\t\t\tRawExpires: \"Wed, 07-Mar-2012 14:25:06 GMT\",\n\t\t\tHttpOnly: true,\n\t\t\tRaw: \".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=\/; HttpOnly\",\n\t\t}},\n\t},\n\t{\n\t\tHeader{\"Set-Cookie\": {\"ASP.NET_SessionId=foo; path=\/; HttpOnly\"}},\n\t\t[]*Cookie{{\n\t\t\tName: \"ASP.NET_SessionId\",\n\t\t\tValue: \"foo\",\n\t\t\tPath: \"\/\",\n\t\t\tHttpOnly: true,\n\t\t\tRaw: \"ASP.NET_SessionId=foo; path=\/; HttpOnly\",\n\t\t}},\n\t},\n\n\t\/\/ TODO(bradfitz): users have reported seeing this in the\n\t\/\/ wild, but do browsers handle it? RFC 6265 just says \"don't\n\t\/\/ do that\" (section 3) and then never mentions header folding\n\t\/\/ again.\n\t\/\/ Header{\"Set-Cookie\": {\"ASP.NET_SessionId=foo; path=\/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=\/; HttpOnly\"}},\n}\n\nfunc toJSON(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%#v\", v)\n\t}\n\treturn string(b)\n}\n\nfunc TestReadSetCookies(t *testing.T) {\n\tfor i, tt := range readSetCookiesTests {\n\t\tfor n := 0; n < 2; n++ { \/\/ to verify readSetCookies doesn't mutate its input\n\t\t\tc := readSetCookies(tt.Header)\n\t\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\t\tt.Errorf(\"#%d readSetCookies: have\\n%s\\nwant\\n%s\\n\", i, toJSON(c), toJSON(tt.Cookies))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar readCookiesTests = []struct {\n\tHeader Header\n\tFilter string\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1\", \"c2=v2\"}},\n\t\t\"\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"Cookie-1\", Value: \"v$1\"},\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1\", \"c2=v2\"}},\n\t\t\"c2\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1; c2=v2\"}},\n\t\t\"\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"Cookie-1\", Value: \"v$1\"},\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1; c2=v2\"}},\n\t\t\"c2\",\n\t\t[]*Cookie{\n\t\t\t{Name: \"c2\", Value: \"v2\"},\n\t\t},\n\t},\n}\n\nfunc TestReadCookies(t *testing.T) {\n\tfor i, tt := range readCookiesTests {\n\t\tfor n := 0; n < 2; n++ { \/\/ to verify readCookies doesn't mutate its input \n\t\t\tc := readCookies(tt.Header, tt.Filter)\n\t\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\t\tt.Errorf(\"#%d readCookies:\\nhave: %s\\nwant: %s\\n\", i, toJSON(c), toJSON(tt.Cookies))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package timer_test\n\nimport (\n\t\"fmt\"\n\t\"github.com\/name5566\/leaf\/timer\"\n)\n\nfunc Example() {\n\td := timer.NewDispatcher(10)\n\n\t\/\/ timer 1\n\td.AfterFunc(1, func() {\n\t\tfmt.Println(\"My name is Leaf\")\n\t})\n\n\t\/\/ timer 2\n\tt := d.AfterFunc(1, func() {\n\t\tfmt.Println(\"will not print\")\n\t})\n\tt.Stop()\n\n\t\/\/ dispatch\n\t(<-d.ChanTimer).Cb()\n\n\t\/\/ Output:\n\t\/\/ My name is Leaf\n}\n<commit_msg>example for CronExpr<commit_after>package timer_test\n\nimport (\n\t\"fmt\"\n\t\"github.com\/name5566\/leaf\/timer\"\n\t\"time\"\n)\n\nfunc ExampleTimer() {\n\td := timer.NewDispatcher(10)\n\n\t\/\/ timer 1\n\td.AfterFunc(1, func() {\n\t\tfmt.Println(\"My name is Leaf\")\n\t})\n\n\t\/\/ timer 2\n\tt := d.AfterFunc(1, func() {\n\t\tfmt.Println(\"will not print\")\n\t})\n\tt.Stop()\n\n\t\/\/ dispatch\n\t(<-d.ChanTimer).Cb()\n\n\t\/\/ Output:\n\t\/\/ My name is Leaf\n}\n\nfunc ExampleCronExpr() {\n\tcronExpr, err := timer.NewCronExpr(\"0 * * * *\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(cronExpr.Next(time.Date(\n\t\t2000, 1, 1,\n\t\t20, 10, 5,\n\t\t0, time.UTC,\n\t)))\n\n\t\/\/ Output:\n\t\/\/ 2000-01-01 21:00:00 +0000 UTC\n}\n<|endoftext|>"} {"text":"<commit_before>package client \/\/ import \"github.com\/docker\/docker\/client\"\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/api\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/internal\/testutil\"\n\t\"github.com\/gotestyourself\/gotestyourself\/skip\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestNewEnvClient(t *testing.T) {\n\tskip.IfCondition(t, runtime.GOOS == \"windows\")\n\n\ttestcases := []struct {\n\t\tdoc string\n\t\tenvs map[string]string\n\t\texpectedError string\n\t\texpectedVersion string\n\t}{\n\t\t{\n\t\t\tdoc: \"default api version\",\n\t\t\tenvs: map[string]string{},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"invalid cert path\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"invalid\/path\",\n\t\t\t},\n\t\t\texpectedError: \"Could not load X509 key pair: open invalid\/path\/cert.pem: no such file or directory\",\n\t\t},\n\t\t{\n\t\t\tdoc: \"default api version with cert path\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"testdata\/\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"default api version with cert path and tls verify\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"testdata\/\",\n\t\t\t\t\"DOCKER_TLS_VERIFY\": \"1\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"default api version with cert path and host\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"testdata\/\",\n\t\t\t\t\"DOCKER_HOST\": \"https:\/\/notaunixsocket\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"invalid docker host\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_HOST\": \"host\",\n\t\t\t},\n\t\t\texpectedError: \"unable to parse docker host `host`\",\n\t\t},\n\t\t{\n\t\t\tdoc: \"invalid docker host, with good format\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_HOST\": \"invalid:\/\/url\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"override api version\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_API_VERSION\": \"1.22\",\n\t\t\t},\n\t\t\texpectedVersion: \"1.22\",\n\t\t},\n\t}\n\n\tenv := envToMap()\n\tdefer mapToEnv(env)\n\tfor _, c := range testcases {\n\t\tmapToEnv(c.envs)\n\t\tapiclient, err := NewEnvClient()\n\t\tif c.expectedError != \"\" {\n\t\t\tassert.Error(t, err, c.doc)\n\t\t\tassert.Equal(t, c.expectedError, err.Error(), c.doc)\n\t\t} else {\n\t\t\tassert.NoError(t, err, c.doc)\n\t\t\tversion := apiclient.ClientVersion()\n\t\t\tassert.Equal(t, c.expectedVersion, version, c.doc)\n\t\t}\n\n\t\tif c.envs[\"DOCKER_TLS_VERIFY\"] != \"\" {\n\t\t\t\/\/ pedantic checking that this is handled correctly\n\t\t\ttr := apiclient.client.Transport.(*http.Transport)\n\t\t\tassert.NotNil(t, tr.TLSClientConfig, c.doc)\n\t\t\tassert.Equal(t, tr.TLSClientConfig.InsecureSkipVerify, false, c.doc)\n\t\t}\n\t}\n}\n\nfunc TestGetAPIPath(t *testing.T) {\n\ttestcases := []struct {\n\t\tversion string\n\t\tpath string\n\t\tquery url.Values\n\t\texpected string\n\t}{\n\t\t{\"\", \"\/containers\/json\", nil, \"\/containers\/json\"},\n\t\t{\"\", \"\/containers\/json\", url.Values{}, \"\/containers\/json\"},\n\t\t{\"\", \"\/containers\/json\", url.Values{\"s\": []string{\"c\"}}, \"\/containers\/json?s=c\"},\n\t\t{\"1.22\", \"\/containers\/json\", nil, \"\/v1.22\/containers\/json\"},\n\t\t{\"1.22\", \"\/containers\/json\", url.Values{}, \"\/v1.22\/containers\/json\"},\n\t\t{\"1.22\", \"\/containers\/json\", url.Values{\"s\": []string{\"c\"}}, \"\/v1.22\/containers\/json?s=c\"},\n\t\t{\"v1.22\", \"\/containers\/json\", nil, \"\/v1.22\/containers\/json\"},\n\t\t{\"v1.22\", \"\/containers\/json\", url.Values{}, \"\/v1.22\/containers\/json\"},\n\t\t{\"v1.22\", \"\/containers\/json\", url.Values{\"s\": []string{\"c\"}}, \"\/v1.22\/containers\/json?s=c\"},\n\t\t{\"v1.22\", \"\/networks\/kiwl$%^\", nil, \"\/v1.22\/networks\/kiwl$%25%5E\"},\n\t}\n\n\tfor _, testcase := range testcases {\n\t\tc := Client{version: testcase.version, basePath: \"\/\"}\n\t\tactual := c.getAPIPath(testcase.path, testcase.query)\n\t\tassert.Equal(t, actual, testcase.expected)\n\t}\n}\n\nfunc TestParseHost(t *testing.T) {\n\tcases := []struct {\n\t\thost string\n\t\tproto string\n\t\taddr string\n\t\tbase string\n\t\terr bool\n\t}{\n\t\t{\"\", \"\", \"\", \"\", true},\n\t\t{\"foobar\", \"\", \"\", \"\", true},\n\t\t{\"foo:\/\/bar\", \"foo\", \"bar\", \"\", false},\n\t\t{\"tcp:\/\/localhost:2476\", \"tcp\", \"localhost:2476\", \"\", false},\n\t\t{\"tcp:\/\/localhost:2476\/path\", \"tcp\", \"localhost:2476\", \"\/path\", false},\n\t}\n\n\tfor _, cs := range cases {\n\t\tp, a, b, e := ParseHost(cs.host)\n\t\tif cs.err {\n\t\t\tassert.Error(t, e)\n\t\t}\n\t\tassert.Equal(t, cs.proto, p)\n\t\tassert.Equal(t, cs.addr, a)\n\t\tassert.Equal(t, cs.base, b)\n\t}\n}\n\nfunc TestParseHostURL(t *testing.T) {\n\ttestcases := []struct {\n\t\thost string\n\t\texpected *url.URL\n\t\texpectedErr string\n\t}{\n\t\t{\n\t\t\thost: \"\",\n\t\t\texpectedErr: \"unable to parse docker host\",\n\t\t},\n\t\t{\n\t\t\thost: \"foobar\",\n\t\t\texpectedErr: \"unable to parse docker host\",\n\t\t},\n\t\t{\n\t\t\thost: \"foo:\/\/bar\",\n\t\t\texpected: &url.URL{Scheme: \"foo\", Host: \"bar\"},\n\t\t},\n\t\t{\n\t\t\thost: \"tcp:\/\/localhost:2476\",\n\t\t\texpected: &url.URL{Scheme: \"tcp\", Host: \"localhost:2476\"},\n\t\t},\n\t\t{\n\t\t\thost: \"tcp:\/\/localhost:2476\/path\",\n\t\t\texpected: &url.URL{Scheme: \"tcp\", Host: \"localhost:2476\", Path: \"\/path\"},\n\t\t},\n\t}\n\n\tfor _, testcase := range testcases {\n\t\tactual, err := ParseHostURL(testcase.host)\n\t\tif testcase.expectedErr != \"\" {\n\t\t\ttestutil.ErrorContains(t, err, testcase.expectedErr)\n\t\t}\n\t\tassert.Equal(t, testcase.expected, actual)\n\t}\n}\n\nfunc TestNewEnvClientSetsDefaultVersion(t *testing.T) {\n\tenv := envToMap()\n\tdefer mapToEnv(env)\n\n\tenvMap := map[string]string{\n\t\t\"DOCKER_HOST\": \"\",\n\t\t\"DOCKER_API_VERSION\": \"\",\n\t\t\"DOCKER_TLS_VERIFY\": \"\",\n\t\t\"DOCKER_CERT_PATH\": \"\",\n\t}\n\tmapToEnv(envMap)\n\n\tclient, err := NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.Equal(t, client.version, api.DefaultVersion)\n\n\texpected := \"1.22\"\n\tos.Setenv(\"DOCKER_API_VERSION\", expected)\n\tclient, err = NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.Equal(t, expected, client.version)\n}\n\n\/\/ TestNegotiateAPIVersionEmpty asserts that client.Client can\n\/\/ negotiate a compatible APIVersion when omitted\nfunc TestNegotiateAPIVersionEmpty(t *testing.T) {\n\tenv := envToMap()\n\tdefer mapToEnv(env)\n\n\tenvMap := map[string]string{\n\t\t\"DOCKER_API_VERSION\": \"\",\n\t}\n\tmapToEnv(envMap)\n\n\tclient, err := NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tping := types.Ping{\n\t\tAPIVersion: \"\",\n\t\tOSType: \"linux\",\n\t\tExperimental: false,\n\t}\n\n\t\/\/ set our version to something new\n\tclient.version = \"1.25\"\n\n\t\/\/ if no version from server, expect the earliest\n\t\/\/ version before APIVersion was implemented\n\texpected := \"1.24\"\n\n\t\/\/ test downgrade\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n}\n\n\/\/ TestNegotiateAPIVersion asserts that client.Client can\n\/\/ negotiate a compatible APIVersion with the server\nfunc TestNegotiateAPIVersion(t *testing.T) {\n\tclient, err := NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpected := \"1.21\"\n\n\tping := types.Ping{\n\t\tAPIVersion: expected,\n\t\tOSType: \"linux\",\n\t\tExperimental: false,\n\t}\n\n\t\/\/ set our version to something new\n\tclient.version = \"1.22\"\n\n\t\/\/ test downgrade\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n\n\t\/\/ set the client version to something older, and verify that we keep the\n\t\/\/ original setting.\n\texpected = \"1.20\"\n\tclient.version = expected\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n\n}\n\n\/\/ TestNegotiateAPIVersionOverride asserts that we honor\n\/\/ the environment variable DOCKER_API_VERSION when negotianing versions\nfunc TestNegotiateAPVersionOverride(t *testing.T) {\n\tenv := envToMap()\n\tdefer mapToEnv(env)\n\n\tenvMap := map[string]string{\n\t\t\"DOCKER_API_VERSION\": \"9.99\",\n\t}\n\tmapToEnv(envMap)\n\n\tclient, err := NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tping := types.Ping{\n\t\tAPIVersion: \"1.24\",\n\t\tOSType: \"linux\",\n\t\tExperimental: false,\n\t}\n\n\texpected := envMap[\"DOCKER_API_VERSION\"]\n\n\t\/\/ test that we honored the env var\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n}\n\n\/\/ mapToEnv takes a map of environment variables and sets them\nfunc mapToEnv(env map[string]string) {\n\tfor k, v := range env {\n\t\tos.Setenv(k, v)\n\t}\n}\n\n\/\/ envToMap returns a map of environment variables\nfunc envToMap() map[string]string {\n\tenv := make(map[string]string)\n\tfor _, e := range os.Environ() {\n\t\tkv := strings.SplitAfterN(e, \"=\", 2)\n\t\tenv[kv[0]] = kv[1]\n\t}\n\n\treturn env\n}\n\ntype roundTripFunc func(*http.Request) (*http.Response, error)\n\nfunc (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {\n\treturn rtf(req)\n}\n\ntype bytesBufferClose struct {\n\t*bytes.Buffer\n}\n\nfunc (bbc bytesBufferClose) Close() error {\n\treturn nil\n}\n\nfunc TestClientRedirect(t *testing.T) {\n\tclient := &http.Client{\n\t\tCheckRedirect: CheckRedirect,\n\t\tTransport: roundTripFunc(func(req *http.Request) (*http.Response, error) {\n\t\t\tif req.URL.String() == \"\/bla\" {\n\t\t\t\treturn &http.Response{StatusCode: 404}, nil\n\t\t\t}\n\t\t\treturn &http.Response{\n\t\t\t\tStatusCode: 301,\n\t\t\t\tHeader: map[string][]string{\"Location\": {\"\/bla\"}},\n\t\t\t\tBody: bytesBufferClose{bytes.NewBuffer(nil)},\n\t\t\t}, nil\n\t\t}),\n\t}\n\n\tcases := []struct {\n\t\thttpMethod string\n\t\texpectedErr error\n\t\tstatusCode int\n\t}{\n\t\t{http.MethodGet, nil, 301},\n\t\t{http.MethodPost, &url.Error{Op: \"Post\", URL: \"\/bla\", Err: ErrRedirect}, 301},\n\t\t{http.MethodPut, &url.Error{Op: \"Put\", URL: \"\/bla\", Err: ErrRedirect}, 301},\n\t\t{http.MethodDelete, &url.Error{Op: \"Delete\", URL: \"\/bla\", Err: ErrRedirect}, 301},\n\t}\n\n\tfor _, tc := range cases {\n\t\treq, err := http.NewRequest(tc.httpMethod, \"\/redirectme\", nil)\n\t\tassert.NoError(t, err)\n\t\tresp, err := client.Do(req)\n\t\tassert.Equal(t, tc.expectedErr, err)\n\t\tassert.Equal(t, tc.statusCode, resp.StatusCode)\n\t}\n}\n<commit_msg>Use gotestyourself env patching<commit_after>package client \/\/ import \"github.com\/docker\/docker\/client\"\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/api\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/internal\/testutil\"\n\t\"github.com\/gotestyourself\/gotestyourself\/env\"\n\t\"github.com\/gotestyourself\/gotestyourself\/skip\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestNewEnvClient(t *testing.T) {\n\tskip.If(t, runtime.GOOS == \"windows\")\n\n\ttestcases := []struct {\n\t\tdoc string\n\t\tenvs map[string]string\n\t\texpectedError string\n\t\texpectedVersion string\n\t}{\n\t\t{\n\t\t\tdoc: \"default api version\",\n\t\t\tenvs: map[string]string{},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"invalid cert path\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"invalid\/path\",\n\t\t\t},\n\t\t\texpectedError: \"Could not load X509 key pair: open invalid\/path\/cert.pem: no such file or directory\",\n\t\t},\n\t\t{\n\t\t\tdoc: \"default api version with cert path\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"testdata\/\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"default api version with cert path and tls verify\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"testdata\/\",\n\t\t\t\t\"DOCKER_TLS_VERIFY\": \"1\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"default api version with cert path and host\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_CERT_PATH\": \"testdata\/\",\n\t\t\t\t\"DOCKER_HOST\": \"https:\/\/notaunixsocket\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"invalid docker host\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_HOST\": \"host\",\n\t\t\t},\n\t\t\texpectedError: \"unable to parse docker host `host`\",\n\t\t},\n\t\t{\n\t\t\tdoc: \"invalid docker host, with good format\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_HOST\": \"invalid:\/\/url\",\n\t\t\t},\n\t\t\texpectedVersion: api.DefaultVersion,\n\t\t},\n\t\t{\n\t\t\tdoc: \"override api version\",\n\t\t\tenvs: map[string]string{\n\t\t\t\t\"DOCKER_API_VERSION\": \"1.22\",\n\t\t\t},\n\t\t\texpectedVersion: \"1.22\",\n\t\t},\n\t}\n\n\tdefer env.PatchAll(t, nil)()\n\tfor _, c := range testcases {\n\t\tenv.PatchAll(t, c.envs)\n\t\tapiclient, err := NewEnvClient()\n\t\tif c.expectedError != \"\" {\n\t\t\tassert.Error(t, err, c.doc)\n\t\t\tassert.Equal(t, c.expectedError, err.Error(), c.doc)\n\t\t} else {\n\t\t\tassert.NoError(t, err, c.doc)\n\t\t\tversion := apiclient.ClientVersion()\n\t\t\tassert.Equal(t, c.expectedVersion, version, c.doc)\n\t\t}\n\n\t\tif c.envs[\"DOCKER_TLS_VERIFY\"] != \"\" {\n\t\t\t\/\/ pedantic checking that this is handled correctly\n\t\t\ttr := apiclient.client.Transport.(*http.Transport)\n\t\t\tassert.NotNil(t, tr.TLSClientConfig, c.doc)\n\t\t\tassert.Equal(t, tr.TLSClientConfig.InsecureSkipVerify, false, c.doc)\n\t\t}\n\t}\n}\n\nfunc TestGetAPIPath(t *testing.T) {\n\ttestcases := []struct {\n\t\tversion string\n\t\tpath string\n\t\tquery url.Values\n\t\texpected string\n\t}{\n\t\t{\"\", \"\/containers\/json\", nil, \"\/containers\/json\"},\n\t\t{\"\", \"\/containers\/json\", url.Values{}, \"\/containers\/json\"},\n\t\t{\"\", \"\/containers\/json\", url.Values{\"s\": []string{\"c\"}}, \"\/containers\/json?s=c\"},\n\t\t{\"1.22\", \"\/containers\/json\", nil, \"\/v1.22\/containers\/json\"},\n\t\t{\"1.22\", \"\/containers\/json\", url.Values{}, \"\/v1.22\/containers\/json\"},\n\t\t{\"1.22\", \"\/containers\/json\", url.Values{\"s\": []string{\"c\"}}, \"\/v1.22\/containers\/json?s=c\"},\n\t\t{\"v1.22\", \"\/containers\/json\", nil, \"\/v1.22\/containers\/json\"},\n\t\t{\"v1.22\", \"\/containers\/json\", url.Values{}, \"\/v1.22\/containers\/json\"},\n\t\t{\"v1.22\", \"\/containers\/json\", url.Values{\"s\": []string{\"c\"}}, \"\/v1.22\/containers\/json?s=c\"},\n\t\t{\"v1.22\", \"\/networks\/kiwl$%^\", nil, \"\/v1.22\/networks\/kiwl$%25%5E\"},\n\t}\n\n\tfor _, testcase := range testcases {\n\t\tc := Client{version: testcase.version, basePath: \"\/\"}\n\t\tactual := c.getAPIPath(testcase.path, testcase.query)\n\t\tassert.Equal(t, actual, testcase.expected)\n\t}\n}\n\nfunc TestParseHost(t *testing.T) {\n\tcases := []struct {\n\t\thost string\n\t\tproto string\n\t\taddr string\n\t\tbase string\n\t\terr bool\n\t}{\n\t\t{\"\", \"\", \"\", \"\", true},\n\t\t{\"foobar\", \"\", \"\", \"\", true},\n\t\t{\"foo:\/\/bar\", \"foo\", \"bar\", \"\", false},\n\t\t{\"tcp:\/\/localhost:2476\", \"tcp\", \"localhost:2476\", \"\", false},\n\t\t{\"tcp:\/\/localhost:2476\/path\", \"tcp\", \"localhost:2476\", \"\/path\", false},\n\t}\n\n\tfor _, cs := range cases {\n\t\tp, a, b, e := ParseHost(cs.host)\n\t\tif cs.err {\n\t\t\tassert.Error(t, e)\n\t\t}\n\t\tassert.Equal(t, cs.proto, p)\n\t\tassert.Equal(t, cs.addr, a)\n\t\tassert.Equal(t, cs.base, b)\n\t}\n}\n\nfunc TestParseHostURL(t *testing.T) {\n\ttestcases := []struct {\n\t\thost string\n\t\texpected *url.URL\n\t\texpectedErr string\n\t}{\n\t\t{\n\t\t\thost: \"\",\n\t\t\texpectedErr: \"unable to parse docker host\",\n\t\t},\n\t\t{\n\t\t\thost: \"foobar\",\n\t\t\texpectedErr: \"unable to parse docker host\",\n\t\t},\n\t\t{\n\t\t\thost: \"foo:\/\/bar\",\n\t\t\texpected: &url.URL{Scheme: \"foo\", Host: \"bar\"},\n\t\t},\n\t\t{\n\t\t\thost: \"tcp:\/\/localhost:2476\",\n\t\t\texpected: &url.URL{Scheme: \"tcp\", Host: \"localhost:2476\"},\n\t\t},\n\t\t{\n\t\t\thost: \"tcp:\/\/localhost:2476\/path\",\n\t\t\texpected: &url.URL{Scheme: \"tcp\", Host: \"localhost:2476\", Path: \"\/path\"},\n\t\t},\n\t}\n\n\tfor _, testcase := range testcases {\n\t\tactual, err := ParseHostURL(testcase.host)\n\t\tif testcase.expectedErr != \"\" {\n\t\t\ttestutil.ErrorContains(t, err, testcase.expectedErr)\n\t\t}\n\t\tassert.Equal(t, testcase.expected, actual)\n\t}\n}\n\nfunc TestNewEnvClientSetsDefaultVersion(t *testing.T) {\n\tdefer env.PatchAll(t, map[string]string{\n\t\t\"DOCKER_HOST\": \"\",\n\t\t\"DOCKER_API_VERSION\": \"\",\n\t\t\"DOCKER_TLS_VERIFY\": \"\",\n\t\t\"DOCKER_CERT_PATH\": \"\",\n\t})()\n\n\tclient, err := NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.Equal(t, client.version, api.DefaultVersion)\n\n\texpected := \"1.22\"\n\tos.Setenv(\"DOCKER_API_VERSION\", expected)\n\tclient, err = NewEnvClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.Equal(t, expected, client.version)\n}\n\n\/\/ TestNegotiateAPIVersionEmpty asserts that client.Client can\n\/\/ negotiate a compatible APIVersion when omitted\nfunc TestNegotiateAPIVersionEmpty(t *testing.T) {\n\tdefer env.PatchAll(t, map[string]string{\"DOCKER_API_VERSION\": \"\"})\n\n\tclient, err := NewEnvClient()\n\trequire.NoError(t, err)\n\n\tping := types.Ping{\n\t\tAPIVersion: \"\",\n\t\tOSType: \"linux\",\n\t\tExperimental: false,\n\t}\n\n\t\/\/ set our version to something new\n\tclient.version = \"1.25\"\n\n\t\/\/ if no version from server, expect the earliest\n\t\/\/ version before APIVersion was implemented\n\texpected := \"1.24\"\n\n\t\/\/ test downgrade\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n}\n\n\/\/ TestNegotiateAPIVersion asserts that client.Client can\n\/\/ negotiate a compatible APIVersion with the server\nfunc TestNegotiateAPIVersion(t *testing.T) {\n\tclient, err := NewEnvClient()\n\trequire.NoError(t, err)\n\n\texpected := \"1.21\"\n\tping := types.Ping{\n\t\tAPIVersion: expected,\n\t\tOSType: \"linux\",\n\t\tExperimental: false,\n\t}\n\n\t\/\/ set our version to something new\n\tclient.version = \"1.22\"\n\n\t\/\/ test downgrade\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n\n\t\/\/ set the client version to something older, and verify that we keep the\n\t\/\/ original setting.\n\texpected = \"1.20\"\n\tclient.version = expected\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n\n}\n\n\/\/ TestNegotiateAPIVersionOverride asserts that we honor\n\/\/ the environment variable DOCKER_API_VERSION when negotianing versions\nfunc TestNegotiateAPVersionOverride(t *testing.T) {\n\texpected := \"9.99\"\n\tdefer env.PatchAll(t, map[string]string{\"DOCKER_API_VERSION\": expected})()\n\n\tclient, err := NewEnvClient()\n\trequire.NoError(t, err)\n\n\tping := types.Ping{\n\t\tAPIVersion: \"1.24\",\n\t\tOSType: \"linux\",\n\t\tExperimental: false,\n\t}\n\n\t\/\/ test that we honored the env var\n\tclient.NegotiateAPIVersionPing(ping)\n\tassert.Equal(t, expected, client.version)\n}\n\ntype roundTripFunc func(*http.Request) (*http.Response, error)\n\nfunc (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {\n\treturn rtf(req)\n}\n\ntype bytesBufferClose struct {\n\t*bytes.Buffer\n}\n\nfunc (bbc bytesBufferClose) Close() error {\n\treturn nil\n}\n\nfunc TestClientRedirect(t *testing.T) {\n\tclient := &http.Client{\n\t\tCheckRedirect: CheckRedirect,\n\t\tTransport: roundTripFunc(func(req *http.Request) (*http.Response, error) {\n\t\t\tif req.URL.String() == \"\/bla\" {\n\t\t\t\treturn &http.Response{StatusCode: 404}, nil\n\t\t\t}\n\t\t\treturn &http.Response{\n\t\t\t\tStatusCode: 301,\n\t\t\t\tHeader: map[string][]string{\"Location\": {\"\/bla\"}},\n\t\t\t\tBody: bytesBufferClose{bytes.NewBuffer(nil)},\n\t\t\t}, nil\n\t\t}),\n\t}\n\n\tcases := []struct {\n\t\thttpMethod string\n\t\texpectedErr error\n\t\tstatusCode int\n\t}{\n\t\t{http.MethodGet, nil, 301},\n\t\t{http.MethodPost, &url.Error{Op: \"Post\", URL: \"\/bla\", Err: ErrRedirect}, 301},\n\t\t{http.MethodPut, &url.Error{Op: \"Put\", URL: \"\/bla\", Err: ErrRedirect}, 301},\n\t\t{http.MethodDelete, &url.Error{Op: \"Delete\", URL: \"\/bla\", Err: ErrRedirect}, 301},\n\t}\n\n\tfor _, tc := range cases {\n\t\treq, err := http.NewRequest(tc.httpMethod, \"\/redirectme\", nil)\n\t\tassert.NoError(t, err)\n\t\tresp, err := client.Do(req)\n\t\tassert.Equal(t, tc.expectedErr, err)\n\t\tassert.Equal(t, tc.statusCode, resp.StatusCode)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"fmt\"\n\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\t\"knative.dev\/pkg\/apis\"\n\tav1alpha1 \"knative.dev\/serving\/pkg\/apis\/autoscaling\/v1alpha1\"\n\t\"knative.dev\/serving\/pkg\/apis\/config\"\n\t\"knative.dev\/serving\/pkg\/apis\/serving\"\n)\n\nconst (\n\t\/\/ ReasonContainerMissing defines the reason for marking container healthiness status\n\t\/\/ as false if the a container image for the revision is missing.\n\tReasonContainerMissing = \"ContainerMissing\"\n\n\t\/\/ ReasonResolvingDigests defines the reason for marking container healthiness status\n\t\/\/ as unknown if the digests for the container images are being resolved.\n\tReasonResolvingDigests = \"ResolvingDigests\"\n\n\t\/\/ ReasonDeploying defines the reason for marking revision availability status as\n\t\/\/ unknown if the revision is still deploying.\n\tReasonDeploying = \"Deploying\"\n\n\t\/\/ ReasonNotOwned defines the reason for marking revision availability status as\n\t\/\/ false due to resource ownership issues.\n\tReasonNotOwned = \"NotOwned\"\n\n\t\/\/ ReasonProgressDeadlineExceeded defines the reason for marking revision availability\n\t\/\/ status as false if progress has exceeded the deadline.\n\tReasonProgressDeadlineExceeded = \"ProgressDeadlineExceeded\"\n)\n\nvar revisionCondSet = apis.NewLivingConditionSet(\n\tRevisionConditionResourcesAvailable,\n\tRevisionConditionContainerHealthy,\n)\n\n\/\/ GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.\nfunc (*Revision) GetConditionSet() apis.ConditionSet {\n\treturn revisionCondSet\n}\n\n\/\/ GetGroupVersionKind returns the GroupVersionKind.\nfunc (r *Revision) GetGroupVersionKind() schema.GroupVersionKind {\n\treturn SchemeGroupVersion.WithKind(\"Revision\")\n}\n\n\/\/ IsReady returns true if the Status condition RevisionConditionReady\n\/\/ is true and the latest spec has been observed.\nfunc (r *Revision) IsReady() bool {\n\trs := r.Status\n\treturn rs.ObservedGeneration == r.Generation &&\n\t\trs.GetCondition(RevisionConditionReady).IsTrue()\n}\n\n\/\/ IsFailed returns true if the resource has observed the latest generation\n\/\/ and ready is false.\nfunc (r *Revision) IsFailed() bool {\n\trs := r.Status\n\treturn rs.ObservedGeneration == r.Generation &&\n\t\trs.GetCondition(RevisionConditionReady).IsFalse()\n}\n\n\/\/ GetContainerConcurrency returns the container concurrency. If\n\/\/ container concurrency is not set, the default value will be returned.\n\/\/ We use the original default (0) here for backwards compatibility.\n\/\/ Previous versions of Knative equated unspecified and zero, so to avoid\n\/\/ changing the value used by Revisions with unspecified values when a different\n\/\/ default is configured, we use the original default instead of the configured\n\/\/ default to remain safe across upgrades.\nfunc (rs *RevisionSpec) GetContainerConcurrency() int64 {\n\tif rs.ContainerConcurrency == nil {\n\t\treturn config.DefaultContainerConcurrency\n\t}\n\treturn *rs.ContainerConcurrency\n}\n\n\/\/ InitializeConditions sets the initial values to the conditions.\nfunc (rs *RevisionStatus) InitializeConditions() {\n\trevisionCondSet.Manage(rs).InitializeConditions()\n}\n\n\/\/ MarkActiveTrue marks Active status on revision as True\nfunc (rs *RevisionStatus) MarkActiveTrue() {\n\trevisionCondSet.Manage(rs).MarkTrue(RevisionConditionActive)\n}\n\n\/\/ MarkActiveFalse marks Active status on revision as False\nfunc (rs *RevisionStatus) MarkActiveFalse(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkFalse(RevisionConditionActive, reason, message)\n}\n\n\/\/ MarkActiveUnknown marks Active status on revision as Unknown\nfunc (rs *RevisionStatus) MarkActiveUnknown(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkUnknown(RevisionConditionActive, reason, message)\n}\n\n\/\/ MarkContainerHealthyTrue marks ContainerHealthy status on revision as True\nfunc (rs *RevisionStatus) MarkContainerHealthyTrue() {\n\trevisionCondSet.Manage(rs).MarkTrue(RevisionConditionContainerHealthy)\n}\n\n\/\/ MarkContainerHealthyFalse marks ContainerHealthy status on revision as False\nfunc (rs *RevisionStatus) MarkContainerHealthyFalse(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkFalse(RevisionConditionContainerHealthy, reason, message)\n}\n\n\/\/ MarkContainerHealthyUnknown marks ContainerHealthy status on revision as Unknown\nfunc (rs *RevisionStatus) MarkContainerHealthyUnknown(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkUnknown(RevisionConditionContainerHealthy, reason, message)\n}\n\n\/\/ MarkResourcesAvailableTrue marks ResourcesAvailable status on revision as True\nfunc (rs *RevisionStatus) MarkResourcesAvailableTrue() {\n\trevisionCondSet.Manage(rs).MarkTrue(RevisionConditionResourcesAvailable)\n}\n\n\/\/ MarkResourcesAvailableFalse marks ResourcesAvailable status on revision as False\nfunc (rs *RevisionStatus) MarkResourcesAvailableFalse(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, reason, message)\n}\n\n\/\/ MarkResourcesAvailableUnknown marks ResourcesAvailable status on revision as Unknown\nfunc (rs *RevisionStatus) MarkResourcesAvailableUnknown(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkUnknown(RevisionConditionResourcesAvailable, reason, message)\n}\n\n\/\/ PropagateDeploymentStatus takes the Deployment status and applies its values\n\/\/ to the Revision status.\nfunc (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentStatus) {\n\tds := serving.TransformDeploymentStatus(original)\n\tcond := ds.GetCondition(serving.DeploymentConditionReady)\n\n\tm := revisionCondSet.Manage(rs)\n\tswitch cond.Status {\n\tcase corev1.ConditionTrue:\n\t\tm.MarkTrue(RevisionConditionResourcesAvailable)\n\tcase corev1.ConditionFalse:\n\t\tm.MarkFalse(RevisionConditionResourcesAvailable, cond.Reason, cond.Message)\n\tcase corev1.ConditionUnknown:\n\t\tm.MarkUnknown(RevisionConditionResourcesAvailable, cond.Reason, cond.Message)\n\t}\n}\n\n\/\/ PropagateAutoscalerStatus propagates autoscaler's status to the revision's status.\nfunc (rs *RevisionStatus) PropagateAutoscalerStatus(ps *av1alpha1.PodAutoscalerStatus) {\n\t\/\/ Propagate the service name from the PA.\n\trs.ServiceName = ps.ServiceName\n\n\t\/\/ Reflect the PA status in our own.\n\tcond := ps.GetCondition(av1alpha1.PodAutoscalerConditionReady)\n\tif cond == nil {\n\t\trs.MarkActiveUnknown(\"Deploying\", \"\")\n\t\treturn\n\t}\n\n\t\/\/ Don't mark the resources available, if deployment status already determined\n\t\/\/ it isn't so.\n\tresUnavailable := !rs.GetCondition(RevisionConditionResourcesAvailable).IsFalse()\n\tif ps.IsScaleTargetInitialized() && resUnavailable {\n\t\t\/\/ Precondition for PA being initialized is SKS being active and\n\t\t\/\/ that implies that |service.endpoints| > 0.\n\t\trs.MarkResourcesAvailableTrue()\n\t\trs.MarkContainerHealthyTrue()\n\t}\n\n\tswitch cond.Status {\n\tcase corev1.ConditionUnknown:\n\t\trs.MarkActiveUnknown(cond.Reason, cond.Message)\n\tcase corev1.ConditionFalse:\n\t\t\/\/ Here we have 2 things coming together at the same time:\n\t\t\/\/ 1. The ready is False, meaning the revision is scaled to 0\n\t\t\/\/ 2. Initial scale was never achieved, which means we failed to progress\n\t\t\/\/ towards initial scale during the progress deadline period and scaled to 0\n\t\t\/\/\t\tfailing to activate.\n\t\t\/\/ So mark the revision as failed at that point.\n\t\t\/\/ See #8922 for details. When we try to scale to 0, we force the Deployment's\n\t\t\/\/ Progress status to become `true`, since successful scale down means\n\t\t\/\/ progress has been achieved.\n\t\t\/\/ There's the possibility of the revision reconciler reconciling PA before\n\t\t\/\/ the ServiceName is populated, and therefore even though we will mark\n\t\t\/\/ ScaleTargetInitialized down the road, we would have marked resources\n\t\t\/\/ unavailable here, and have no way of recovering later.\n\t\t\/\/ If the ResourcesAvailable is already false, don't override the message.\n\t\tif !ps.IsScaleTargetInitialized() && resUnavailable && ps.ServiceName != \"\" {\n\t\t\trs.MarkResourcesAvailableFalse(ReasonProgressDeadlineExceeded,\n\t\t\t\t\"Initial scale was never achieved\")\n\t\t}\n\t\trs.MarkActiveFalse(cond.Reason, cond.Message)\n\tcase corev1.ConditionTrue:\n\t\trs.MarkActiveTrue()\n\n\t\t\/\/ Precondition for PA being active is SKS being active and\n\t\t\/\/ that implies that |service.endpoints| > 0.\n\t\t\/\/\n\t\t\/\/ Note: This is needed for backwards compatibility as we're adding the new\n\t\t\/\/ ScaleTargetInitialized condition to gate readiness.\n\t\trs.MarkResourcesAvailableTrue()\n\t\trs.MarkContainerHealthyTrue()\n\t}\n}\n\n\/\/ ResourceNotOwnedMessage constructs the status message if ownership on the\n\/\/ resource is not right.\nfunc ResourceNotOwnedMessage(kind, name string) string {\n\treturn fmt.Sprintf(\"There is an existing %s %q that we do not own.\", kind, name)\n}\n\n\/\/ ExitCodeReason constructs the status message from an exit code\nfunc ExitCodeReason(exitCode int32) string {\n\treturn fmt.Sprint(\"ExitCode\", exitCode)\n}\n\n\/\/ RevisionContainerExitingMessage constructs the status message if a container\n\/\/ fails to come up.\nfunc RevisionContainerExitingMessage(message string) string {\n\treturn fmt.Sprint(\"Container failed with: \", message)\n}\n\n\/\/ RevisionContainerMissingMessage constructs the status message if a given image\n\/\/ cannot be pulled correctly.\nfunc RevisionContainerMissingMessage(image string, message string) string {\n\treturn fmt.Sprintf(\"Unable to fetch image %q: %s\", image, message)\n}\n<commit_msg>Move negation of resource available condition (#9798)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"fmt\"\n\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\t\"knative.dev\/pkg\/apis\"\n\tav1alpha1 \"knative.dev\/serving\/pkg\/apis\/autoscaling\/v1alpha1\"\n\t\"knative.dev\/serving\/pkg\/apis\/config\"\n\t\"knative.dev\/serving\/pkg\/apis\/serving\"\n)\n\nconst (\n\t\/\/ ReasonContainerMissing defines the reason for marking container healthiness status\n\t\/\/ as false if the a container image for the revision is missing.\n\tReasonContainerMissing = \"ContainerMissing\"\n\n\t\/\/ ReasonResolvingDigests defines the reason for marking container healthiness status\n\t\/\/ as unknown if the digests for the container images are being resolved.\n\tReasonResolvingDigests = \"ResolvingDigests\"\n\n\t\/\/ ReasonDeploying defines the reason for marking revision availability status as\n\t\/\/ unknown if the revision is still deploying.\n\tReasonDeploying = \"Deploying\"\n\n\t\/\/ ReasonNotOwned defines the reason for marking revision availability status as\n\t\/\/ false due to resource ownership issues.\n\tReasonNotOwned = \"NotOwned\"\n\n\t\/\/ ReasonProgressDeadlineExceeded defines the reason for marking revision availability\n\t\/\/ status as false if progress has exceeded the deadline.\n\tReasonProgressDeadlineExceeded = \"ProgressDeadlineExceeded\"\n)\n\nvar revisionCondSet = apis.NewLivingConditionSet(\n\tRevisionConditionResourcesAvailable,\n\tRevisionConditionContainerHealthy,\n)\n\n\/\/ GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.\nfunc (*Revision) GetConditionSet() apis.ConditionSet {\n\treturn revisionCondSet\n}\n\n\/\/ GetGroupVersionKind returns the GroupVersionKind.\nfunc (r *Revision) GetGroupVersionKind() schema.GroupVersionKind {\n\treturn SchemeGroupVersion.WithKind(\"Revision\")\n}\n\n\/\/ IsReady returns true if the Status condition RevisionConditionReady\n\/\/ is true and the latest spec has been observed.\nfunc (r *Revision) IsReady() bool {\n\trs := r.Status\n\treturn rs.ObservedGeneration == r.Generation &&\n\t\trs.GetCondition(RevisionConditionReady).IsTrue()\n}\n\n\/\/ IsFailed returns true if the resource has observed the latest generation\n\/\/ and ready is false.\nfunc (r *Revision) IsFailed() bool {\n\trs := r.Status\n\treturn rs.ObservedGeneration == r.Generation &&\n\t\trs.GetCondition(RevisionConditionReady).IsFalse()\n}\n\n\/\/ GetContainerConcurrency returns the container concurrency. If\n\/\/ container concurrency is not set, the default value will be returned.\n\/\/ We use the original default (0) here for backwards compatibility.\n\/\/ Previous versions of Knative equated unspecified and zero, so to avoid\n\/\/ changing the value used by Revisions with unspecified values when a different\n\/\/ default is configured, we use the original default instead of the configured\n\/\/ default to remain safe across upgrades.\nfunc (rs *RevisionSpec) GetContainerConcurrency() int64 {\n\tif rs.ContainerConcurrency == nil {\n\t\treturn config.DefaultContainerConcurrency\n\t}\n\treturn *rs.ContainerConcurrency\n}\n\n\/\/ InitializeConditions sets the initial values to the conditions.\nfunc (rs *RevisionStatus) InitializeConditions() {\n\trevisionCondSet.Manage(rs).InitializeConditions()\n}\n\n\/\/ MarkActiveTrue marks Active status on revision as True\nfunc (rs *RevisionStatus) MarkActiveTrue() {\n\trevisionCondSet.Manage(rs).MarkTrue(RevisionConditionActive)\n}\n\n\/\/ MarkActiveFalse marks Active status on revision as False\nfunc (rs *RevisionStatus) MarkActiveFalse(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkFalse(RevisionConditionActive, reason, message)\n}\n\n\/\/ MarkActiveUnknown marks Active status on revision as Unknown\nfunc (rs *RevisionStatus) MarkActiveUnknown(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkUnknown(RevisionConditionActive, reason, message)\n}\n\n\/\/ MarkContainerHealthyTrue marks ContainerHealthy status on revision as True\nfunc (rs *RevisionStatus) MarkContainerHealthyTrue() {\n\trevisionCondSet.Manage(rs).MarkTrue(RevisionConditionContainerHealthy)\n}\n\n\/\/ MarkContainerHealthyFalse marks ContainerHealthy status on revision as False\nfunc (rs *RevisionStatus) MarkContainerHealthyFalse(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkFalse(RevisionConditionContainerHealthy, reason, message)\n}\n\n\/\/ MarkContainerHealthyUnknown marks ContainerHealthy status on revision as Unknown\nfunc (rs *RevisionStatus) MarkContainerHealthyUnknown(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkUnknown(RevisionConditionContainerHealthy, reason, message)\n}\n\n\/\/ MarkResourcesAvailableTrue marks ResourcesAvailable status on revision as True\nfunc (rs *RevisionStatus) MarkResourcesAvailableTrue() {\n\trevisionCondSet.Manage(rs).MarkTrue(RevisionConditionResourcesAvailable)\n}\n\n\/\/ MarkResourcesAvailableFalse marks ResourcesAvailable status on revision as False\nfunc (rs *RevisionStatus) MarkResourcesAvailableFalse(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, reason, message)\n}\n\n\/\/ MarkResourcesAvailableUnknown marks ResourcesAvailable status on revision as Unknown\nfunc (rs *RevisionStatus) MarkResourcesAvailableUnknown(reason, message string) {\n\trevisionCondSet.Manage(rs).MarkUnknown(RevisionConditionResourcesAvailable, reason, message)\n}\n\n\/\/ PropagateDeploymentStatus takes the Deployment status and applies its values\n\/\/ to the Revision status.\nfunc (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentStatus) {\n\tds := serving.TransformDeploymentStatus(original)\n\tcond := ds.GetCondition(serving.DeploymentConditionReady)\n\n\tm := revisionCondSet.Manage(rs)\n\tswitch cond.Status {\n\tcase corev1.ConditionTrue:\n\t\tm.MarkTrue(RevisionConditionResourcesAvailable)\n\tcase corev1.ConditionFalse:\n\t\tm.MarkFalse(RevisionConditionResourcesAvailable, cond.Reason, cond.Message)\n\tcase corev1.ConditionUnknown:\n\t\tm.MarkUnknown(RevisionConditionResourcesAvailable, cond.Reason, cond.Message)\n\t}\n}\n\n\/\/ PropagateAutoscalerStatus propagates autoscaler's status to the revision's status.\nfunc (rs *RevisionStatus) PropagateAutoscalerStatus(ps *av1alpha1.PodAutoscalerStatus) {\n\t\/\/ Propagate the service name from the PA.\n\trs.ServiceName = ps.ServiceName\n\n\t\/\/ Reflect the PA status in our own.\n\tcond := ps.GetCondition(av1alpha1.PodAutoscalerConditionReady)\n\tif cond == nil {\n\t\trs.MarkActiveUnknown(\"Deploying\", \"\")\n\t\treturn\n\t}\n\n\t\/\/ Don't mark the resources available, if deployment status already determined\n\t\/\/ it isn't so.\n\tresUnavailable := rs.GetCondition(RevisionConditionResourcesAvailable).IsFalse()\n\tif ps.IsScaleTargetInitialized() && !resUnavailable {\n\t\t\/\/ Precondition for PA being initialized is SKS being active and\n\t\t\/\/ that implies that |service.endpoints| > 0.\n\t\trs.MarkResourcesAvailableTrue()\n\t\trs.MarkContainerHealthyTrue()\n\t}\n\n\tswitch cond.Status {\n\tcase corev1.ConditionUnknown:\n\t\trs.MarkActiveUnknown(cond.Reason, cond.Message)\n\tcase corev1.ConditionFalse:\n\t\t\/\/ Here we have 2 things coming together at the same time:\n\t\t\/\/ 1. The ready is False, meaning the revision is scaled to 0\n\t\t\/\/ 2. Initial scale was never achieved, which means we failed to progress\n\t\t\/\/ towards initial scale during the progress deadline period and scaled to 0\n\t\t\/\/\t\tfailing to activate.\n\t\t\/\/ So mark the revision as failed at that point.\n\t\t\/\/ See #8922 for details. When we try to scale to 0, we force the Deployment's\n\t\t\/\/ Progress status to become `true`, since successful scale down means\n\t\t\/\/ progress has been achieved.\n\t\t\/\/ There's the possibility of the revision reconciler reconciling PA before\n\t\t\/\/ the ServiceName is populated, and therefore even though we will mark\n\t\t\/\/ ScaleTargetInitialized down the road, we would have marked resources\n\t\t\/\/ unavailable here, and have no way of recovering later.\n\t\t\/\/ If the ResourcesAvailable is already false, don't override the message.\n\t\tif !ps.IsScaleTargetInitialized() && !resUnavailable && ps.ServiceName != \"\" {\n\t\t\trs.MarkResourcesAvailableFalse(ReasonProgressDeadlineExceeded,\n\t\t\t\t\"Initial scale was never achieved\")\n\t\t}\n\t\trs.MarkActiveFalse(cond.Reason, cond.Message)\n\tcase corev1.ConditionTrue:\n\t\trs.MarkActiveTrue()\n\n\t\t\/\/ Precondition for PA being active is SKS being active and\n\t\t\/\/ that implies that |service.endpoints| > 0.\n\t\t\/\/\n\t\t\/\/ Note: This is needed for backwards compatibility as we're adding the new\n\t\t\/\/ ScaleTargetInitialized condition to gate readiness.\n\t\trs.MarkResourcesAvailableTrue()\n\t\trs.MarkContainerHealthyTrue()\n\t}\n}\n\n\/\/ ResourceNotOwnedMessage constructs the status message if ownership on the\n\/\/ resource is not right.\nfunc ResourceNotOwnedMessage(kind, name string) string {\n\treturn fmt.Sprintf(\"There is an existing %s %q that we do not own.\", kind, name)\n}\n\n\/\/ ExitCodeReason constructs the status message from an exit code\nfunc ExitCodeReason(exitCode int32) string {\n\treturn fmt.Sprint(\"ExitCode\", exitCode)\n}\n\n\/\/ RevisionContainerExitingMessage constructs the status message if a container\n\/\/ fails to come up.\nfunc RevisionContainerExitingMessage(message string) string {\n\treturn fmt.Sprint(\"Container failed with: \", message)\n}\n\n\/\/ RevisionContainerMissingMessage constructs the status message if a given image\n\/\/ cannot be pulled correctly.\nfunc RevisionContainerMissingMessage(image string, message string) string {\n\treturn fmt.Sprintf(\"Unable to fetch image %q: %s\", image, message)\n}\n<|endoftext|>"} {"text":"<commit_before>package resource\n\nimport (\n\t\"testing\"\n\n\t\"reflect\"\n\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/kubectl\/pkg\/loader\"\n)\n\nvar encoded = []byte(`apiVersion: v1\nkind: Deployment\nmetadata:\n name: dply1\n---\napiVersion: v1\nkind: Deployment\nmetadata:\n name: dply2\n`)\n\ntype fakeLoader struct {\n}\n\nfunc (l fakeLoader) New(newRoot string) (loader.Loader, error) {\n\treturn l, nil\n}\n\nfunc (l fakeLoader) Load(location string) ([]byte, error) {\n\treturn encoded, nil\n}\n\nfunc makeUnconstructed(name string) *unstructured.Unstructured {\n\treturn &unstructured.Unstructured{\n\t\tObject: map[string]interface{}{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\": \"Deployment\",\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": name,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc TestAppResourceList_Resources(t *testing.T) {\n\tl := fakeLoader{}\n\texpected := []*Resource{\n\t\t{Data: makeUnconstructed(\"dply1\")},\n\t\t{Data: makeUnconstructed(\"dply2\")},\n\t}\n\n\tresources, _ := ResourcesFromPath(\"fake\/path\", l)\n\tif len(resources) != 2 {\n\t\tt.Fatalf(\"%#v should contain 2 appResource, but got %d\", resources, len(resources))\n\t}\n\n\tfor i, r := range resources {\n\t\tif !reflect.DeepEqual(r.Data, expected[i].Data) {\n\t\t\tt.Fatalf(\"expected %v, but got %v\", expected[i].Data, r.Data)\n\t\t}\n\t}\n}\n<commit_msg>Fix change to Loader interface; Root() method added to fake loader<commit_after>package resource\n\nimport (\n\t\"testing\"\n\n\t\"reflect\"\n\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/kubectl\/pkg\/loader\"\n)\n\nvar encoded = []byte(`apiVersion: v1\nkind: Deployment\nmetadata:\n name: dply1\n---\napiVersion: v1\nkind: Deployment\nmetadata:\n name: dply2\n`)\n\ntype fakeLoader struct {\n}\n\nfunc (l fakeLoader) Root() string {\n\treturn \"unused\"\n}\n\nfunc (l fakeLoader) New(newRoot string) (loader.Loader, error) {\n\treturn l, nil\n}\n\nfunc (l fakeLoader) Load(location string) ([]byte, error) {\n\treturn encoded, nil\n}\n\nfunc makeUnconstructed(name string) *unstructured.Unstructured {\n\treturn &unstructured.Unstructured{\n\t\tObject: map[string]interface{}{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\": \"Deployment\",\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": name,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc TestAppResourceList_Resources(t *testing.T) {\n\tl := fakeLoader{}\n\texpected := []*Resource{\n\t\t{Data: makeUnconstructed(\"dply1\")},\n\t\t{Data: makeUnconstructed(\"dply2\")},\n\t}\n\n\tresources, _ := ResourcesFromPath(\"fake\/path\", l)\n\tif len(resources) != 2 {\n\t\tt.Fatalf(\"%#v should contain 2 appResource, but got %d\", resources, len(resources))\n\t}\n\n\tfor i, r := range resources {\n\t\tif !reflect.DeepEqual(r.Data, expected[i].Data) {\n\t\t\tt.Fatalf(\"expected %v, but got %v\", expected[i].Data, r.Data)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage server\n\nimport (\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/client-go\/rest\"\n)\n\n\/\/ DeprecatedInsecureServingInfo is the main context object for the insecure http server.\ntype DeprecatedInsecureServingInfo struct {\n\t\/\/ Listener is the secure server network listener.\n\tListener net.Listener\n\t\/\/ optional server name for log messages\n\tName string\n}\n\n\/\/ Serve starts an insecure http server with the given handler. It fails only if\n\/\/ the initial listen call fails. It does not block.\nfunc (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error {\n\tinsecureServer := &http.Server{\n\t\tAddr: s.Listener.Addr().String(),\n\t\tHandler: handler,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\n\tif len(s.Name) > 0 {\n\t\tglog.Infof(\"Serving %s insecurely on %s\", s.Name, s.Listener.Addr())\n\t} else {\n\t\tglog.Infof(\"Serving insecurely on %s\", s.Listener.Addr())\n\t}\n\treturn RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh)\n}\n\nfunc (s *DeprecatedInsecureServingInfo) NewLoopbackClientConfig() (*rest.Config, error) {\n\tif s == nil {\n\t\treturn nil, nil\n\t}\n\n\thost, port, err := LoopbackHostPort(s.Listener.Addr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &rest.Config{\n\t\tHost: \"http:\/\/\" + net.JoinHostPort(host, port),\n\t\t\/\/ Increase QPS limits. The client is currently passed to all admission plugins,\n\t\t\/\/ and those can be throttled in case of higher load on apiserver - see #22340 and #22422\n\t\t\/\/ for more details. Once #22422 is fixed, we may want to remove it.\n\t\tQPS: 50,\n\t\tBurst: 100,\n\t}, nil\n}\n<commit_msg>kube-controller-manager: disable authn\/z on insecure port<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage server\n\nimport (\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/apiserver\/pkg\/authentication\/user\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\n\/\/ DeprecatedInsecureServingInfo is the main context object for the insecure http server.\ntype DeprecatedInsecureServingInfo struct {\n\t\/\/ Listener is the secure server network listener.\n\tListener net.Listener\n\t\/\/ optional server name for log messages\n\tName string\n}\n\n\/\/ Serve starts an insecure http server with the given handler. It fails only if\n\/\/ the initial listen call fails. It does not block.\nfunc (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error {\n\tinsecureServer := &http.Server{\n\t\tAddr: s.Listener.Addr().String(),\n\t\tHandler: handler,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\n\tif len(s.Name) > 0 {\n\t\tglog.Infof(\"Serving %s insecurely on %s\", s.Name, s.Listener.Addr())\n\t} else {\n\t\tglog.Infof(\"Serving insecurely on %s\", s.Listener.Addr())\n\t}\n\treturn RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh)\n}\n\nfunc (s *DeprecatedInsecureServingInfo) NewLoopbackClientConfig() (*rest.Config, error) {\n\tif s == nil {\n\t\treturn nil, nil\n\t}\n\n\thost, port, err := LoopbackHostPort(s.Listener.Addr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &rest.Config{\n\t\tHost: \"http:\/\/\" + net.JoinHostPort(host, port),\n\t\t\/\/ Increase QPS limits. The client is currently passed to all admission plugins,\n\t\t\/\/ and those can be throttled in case of higher load on apiserver - see #22340 and #22422\n\t\t\/\/ for more details. Once #22422 is fixed, we may want to remove it.\n\t\tQPS: 50,\n\t\tBurst: 100,\n\t}, nil\n}\n\n\/\/ InsecureSuperuser implements authenticator.Request to always return a superuser.\n\/\/ This is functionally equivalent to skipping authentication and authorization,\n\/\/ but allows apiserver code to stop special-casing a nil user to skip authorization checks.\ntype InsecureSuperuser struct{}\n\nfunc (InsecureSuperuser) AuthenticateRequest(req *http.Request) (user.Info, bool, error) {\n\treturn &user.DefaultInfo{\n\t\tName: \"system:unsecured\",\n\t\tGroups: []string{user.SystemPrivilegedGroup, user.AllAuthenticated},\n\t}, true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ this provides a simple datastore for kites just with get\/set methods.\n\/\/ mongodb has 24k number of collection limit in a single database\n\/\/ http:\/\/stackoverflow.com\/questions\/9858393\/limits-of-number-of-collections-in-databases\n\/\/ thats why we have a single collection and use single index\n\/\/ though instead of using a single collection we can use different strategies, like\n\/\/ multiple database, single collections\n\/\/ multiple database, multiple collections\n\/\/ etc... to make it a bit more performant.\n\/\/ though mongodb has an auto sharding setup, http:\/\/docs.mongodb.org\/manual\/sharding\/\n\/\/ which should be considered first. or use another datastore like elasticsearch, cassandra etc.\n\/\/ to handle the sharding on database level.\n\/\/ thats why we only have one strategy only for now, to get the ball rolling.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"koding\/newkite\/kite\"\n\t\"koding\/db\/mongodb\/modelhelper\"\n)\n\nvar port = flag.String(\"port\", \"\", \"port to bind itself\")\n\nfunc main() {\n\tflag.Parse()\n\n\toptions := &kite.Options{\n\t\tKitename: \"datastore\",\n\t\tVersion: \"1\",\n\t\tPort: *port,\n\t\tRegion: \"localhost\",\n\t\tEnvironment: \"development\",\n\t\tPublicIP: \"127.0.0.1\",\n\t}\n\n\tk := kite.New(options)\n\n\tk.HandleFunc(\"set\", Set)\n\tk.HandleFunc(\"get\", Get)\n\n\tk.Run()\n}\n\n\nfunc Set(r *kite.Request) (interface{}, error) {\n\tkv, err := r.Args.Array()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeyValue := modelhelper.NewKeyValue(r.Username, r.RemoteKite.ID, kv[0].(string), kv[1].(string))\n\terr = modelhelper.UpsertKeyValue(keyValue)\n\tfmt.Println(\"set called with - \", kv, keyValue)\n\tresult := true\n\tif err != nil {\n\t\tresult = false\n\t}\n\treturn result, err\n}\n\nfunc Get(r *kite.Request) (interface{}, error) {\n\tkey, err := r.Args.String()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"requesting user :\", r.Username, \" kite:\", r.RemoteKite)\n\tfmt.Println(\"get called with - \", key)\n\n\tkv, err := modelhelper.GetKeyValue(r.Username, r.RemoteKite.ID, key)\n\tif err != nil{\n\t\treturn err, nil\n\t}\n\n\treturn kv.Value, nil\n}\n<commit_msg>kite: datastore kite: kiteid is not usable, using username, kitename, environment, key instead<commit_after>\/\/ this provides a simple datastore for kites just with get\/set methods.\n\/\/ mongodb has 24k number of collection limit in a single database\n\/\/ http:\/\/stackoverflow.com\/questions\/9858393\/limits-of-number-of-collections-in-databases\n\/\/ thats why we have a single collection and use single index\n\/\/ though instead of using a single collection we can use different strategies, like\n\/\/ multiple database, single collections\n\/\/ multiple database, multiple collections\n\/\/ etc... to make it a bit more performant.\n\/\/ though mongodb has an auto sharding setup, http:\/\/docs.mongodb.org\/manual\/sharding\/\n\/\/ which should be considered first. or use another datastore like elasticsearch, cassandra etc.\n\/\/ to handle the sharding on database level.\n\/\/ thats why we only have one strategy only for now, to get the ball rolling.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"koding\/newkite\/kite\"\n\t\"koding\/db\/mongodb\/modelhelper\"\n)\n\nvar port = flag.String(\"port\", \"\", \"port to bind itself\")\n\nfunc main() {\n\tflag.Parse()\n\n\toptions := &kite.Options{\n\t\tKitename: \"datastore\",\n\t\tVersion: \"1\",\n\t\tPort: *port,\n\t\tRegion: \"localhost\",\n\t\tEnvironment: \"development\",\n\t\tPublicIP: \"127.0.0.1\",\n\t}\n\n\tk := kite.New(options)\n\n\tk.HandleFunc(\"set\", Set)\n\tk.HandleFunc(\"get\", Get)\n\n\tk.Run()\n\tmodelhelper.EnsureKeyValueIndexes()\n}\n\n\nfunc Set(r *kite.Request) (interface{}, error) {\n\tkv, err := r.Args.Array()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeyValue := modelhelper.NewKeyValue(r.Username, r.RemoteKite.Name, r.RemoteKite.Environment, kv[0].(string))\n\tkeyValue.Value = kv[1].(string)\n\terr = modelhelper.UpsertKeyValue(keyValue)\n\n\tresult := true\n\tif err != nil {\n\t\tresult = false\n\t}\n\treturn result, err\n}\n\nfunc Get(r *kite.Request) (interface{}, error) {\n\tkey, err := r.Args.String()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkv, err := modelhelper.GetKeyValue(r.Username, r.RemoteKite.Name, r.RemoteKite.Environment, key)\n\tif err != nil{\n\t\treturn err, nil\n\t}\n\n\treturn kv.Value, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/dokku\/dokku\/plugins\/common\"\n\t\"github.com\/dokku\/dokku\/plugins\/ps\"\n)\n\n\/\/ main entrypoint to all subcommands\nfunc main() {\n\tparts := strings.Split(os.Args[0], \"\/\")\n\tsubcommand := parts[len(parts)-1]\n\n\tvar err error\n\tswitch subcommand {\n\tcase \"inspect\":\n\t\targs := flag.NewFlagSet(\"ps:inspect\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandInspect(appName)\n\tcase \"rebuild\":\n\t\targs := flag.NewFlagSet(\"ps:rebuild\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: restart all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to restart in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandRebuild(appName, *allApps, *parallelCount)\n\tcase \"report\":\n\t\targs := flag.NewFlagSet(\"ps:report\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\tinfoFlag := args.Arg(1)\n\t\terr = ps.CommandReport(appName, infoFlag)\n\tcase \"restart\":\n\t\targs := flag.NewFlagSet(\"ps:restart\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: restart all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to restart in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandRestart(appName, *allApps, *parallelCount)\n\tcase \"restore\":\n\t\targs := flag.NewFlagSet(\"ps:restore\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandRestore(appName)\n\tcase \"retire\":\n\t\targs := flag.NewFlagSet(\"ps:retire\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\terr = ps.CommandRetire()\n\tcase \"scale\":\n\t\targs := flag.NewFlagSet(\"ps:scale\", flag.ExitOnError)\n\t\tskipDeploy := args.Bool(\"skip-deploy\", false, \"--skip-deploy: skip deploy of the app\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\t_, processTuples := common.ShiftString(args.Args())\n\t\terr = ps.CommandScale(appName, *skipDeploy, processTuples)\n\tcase \"set\":\n\t\targs := flag.NewFlagSet(\"ps:set\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\tproperty := args.Arg(1)\n\t\tvalue := args.Arg(2)\n\t\terr = ps.CommandSet(appName, property, value)\n\tcase \"start\":\n\t\targs := flag.NewFlagSet(\"ps:start\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: restart all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to restart in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandStart(appName, *allApps, *parallelCount)\n\tcase \"stop\":\n\t\targs := flag.NewFlagSet(\"ps:stop\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: restart all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to restart in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandStop(appName, *allApps, *parallelCount)\n\tdefault:\n\t\tcommon.LogFail(fmt.Sprintf(\"Invalid plugin subcommand call: %s\", subcommand))\n\t}\n\n\tif err != nil {\n\t\tcommon.LogFail(err.Error())\n\t}\n}\n<commit_msg>fix: correct help output<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/dokku\/dokku\/plugins\/common\"\n\t\"github.com\/dokku\/dokku\/plugins\/ps\"\n)\n\n\/\/ main entrypoint to all subcommands\nfunc main() {\n\tparts := strings.Split(os.Args[0], \"\/\")\n\tsubcommand := parts[len(parts)-1]\n\n\tvar err error\n\tswitch subcommand {\n\tcase \"inspect\":\n\t\targs := flag.NewFlagSet(\"ps:inspect\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandInspect(appName)\n\tcase \"rebuild\":\n\t\targs := flag.NewFlagSet(\"ps:rebuild\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: rebuild all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to rebuild in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandRebuild(appName, *allApps, *parallelCount)\n\tcase \"report\":\n\t\targs := flag.NewFlagSet(\"ps:report\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\tinfoFlag := args.Arg(1)\n\t\terr = ps.CommandReport(appName, infoFlag)\n\tcase \"restart\":\n\t\targs := flag.NewFlagSet(\"ps:restart\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: restart all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to restart in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandRestart(appName, *allApps, *parallelCount)\n\tcase \"restore\":\n\t\targs := flag.NewFlagSet(\"ps:restore\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandRestore(appName)\n\tcase \"retire\":\n\t\targs := flag.NewFlagSet(\"ps:retire\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\terr = ps.CommandRetire()\n\tcase \"scale\":\n\t\targs := flag.NewFlagSet(\"ps:scale\", flag.ExitOnError)\n\t\tskipDeploy := args.Bool(\"skip-deploy\", false, \"--skip-deploy: skip deploy of the app\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\t_, processTuples := common.ShiftString(args.Args())\n\t\terr = ps.CommandScale(appName, *skipDeploy, processTuples)\n\tcase \"set\":\n\t\targs := flag.NewFlagSet(\"ps:set\", flag.ExitOnError)\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\tproperty := args.Arg(1)\n\t\tvalue := args.Arg(2)\n\t\terr = ps.CommandSet(appName, property, value)\n\tcase \"start\":\n\t\targs := flag.NewFlagSet(\"ps:start\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: start all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to start in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandStart(appName, *allApps, *parallelCount)\n\tcase \"stop\":\n\t\targs := flag.NewFlagSet(\"ps:stop\", flag.ExitOnError)\n\t\tallApps := args.Bool(\"all\", false, \"--all: stop all apps\")\n\t\tparallelCount := args.Int(\"parallel\", ps.RunInSerial, \"--parallel: number of apps to stop in parallel, -1 to match cpu count\")\n\t\targs.Parse(os.Args[2:])\n\t\tappName := args.Arg(0)\n\t\terr = ps.CommandStop(appName, *allApps, *parallelCount)\n\tdefault:\n\t\tcommon.LogFail(fmt.Sprintf(\"Invalid plugin subcommand call: %s\", subcommand))\n\t}\n\n\tif err != nil {\n\t\tcommon.LogFail(err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package rand implements pseudo-random number generators.\n\/\/\n\/\/ Random numbers are generated by a Source. Top-level functions, such as\n\/\/ Float64 and Int, use a default shared Source that produces a deterministic\n\/\/ sequence of values each time a program is run. Use the Seed function to\n\/\/ initialize the default Source if different behavior is required for each run.\n\/\/ The default Source is safe for concurrent use by multiple goroutines.\n\/\/\n\/\/ For random numbers suitable for security-sensitive work, see the crypto\/rand\n\/\/ package.\npackage rand\n\nimport \"sync\"\n\n\/\/ A Source represents a source of uniformly-distributed\n\/\/ pseudo-random int64 values in the range [0, 1<<63).\ntype Source interface {\n\tInt63() int64\n\tSeed(seed int64)\n}\n\n\/\/ NewSource returns a new pseudo-random Source seeded with the given value.\nfunc NewSource(seed int64) Source {\n\tvar rng rngSource\n\trng.Seed(seed)\n\treturn &rng\n}\n\n\/\/ A Rand is a source of random numbers.\ntype Rand struct {\n\tsrc Source\n}\n\n\/\/ New returns a new Rand that uses random values from src\n\/\/ to generate other random values.\nfunc New(src Source) *Rand { return &Rand{src} }\n\n\/\/ Seed uses the provided seed value to initialize the generator to a deterministic state.\nfunc (r *Rand) Seed(seed int64) { r.src.Seed(seed) }\n\n\/\/ Int63 returns a non-negative pseudo-random 63-bit integer as an int64.\nfunc (r *Rand) Int63() int64 { return r.src.Int63() }\n\n\/\/ Uint32 returns a pseudo-random 32-bit value as a uint32.\nfunc (r *Rand) Uint32() uint32 { return uint32(r.Int63() >> 31) }\n\n\/\/ Int31 returns a non-negative pseudo-random 31-bit integer as an int32.\nfunc (r *Rand) Int31() int32 { return int32(r.Int63() >> 32) }\n\n\/\/ Int returns a non-negative pseudo-random int.\nfunc (r *Rand) Int() int {\n\tu := uint(r.Int63())\n\treturn int(u << 1 >> 1) \/\/ clear sign bit if int == int32\n}\n\n\/\/ Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).\n\/\/ It panics if n <= 0.\nfunc (r *Rand) Int63n(n int64) int64 {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Int63n\")\n\t}\n\tif n&(n-1) == 0 { \/\/ n is power of two, can mask\n\t\treturn r.Int63() & (n - 1)\n\t}\n\tmax := int64((1 << 63) - 1 - (1<<63)%uint64(n))\n\tv := r.Int63()\n\tfor v > max {\n\t\tv = r.Int63()\n\t}\n\treturn v % n\n}\n\n\/\/ Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).\n\/\/ It panics if n <= 0.\nfunc (r *Rand) Int31n(n int32) int32 {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Int31n\")\n\t}\n\tif n&(n-1) == 0 { \/\/ n is power of two, can mask\n\t\treturn r.Int31() & (n - 1)\n\t}\n\tmax := int32((1 << 31) - 1 - (1<<31)%uint32(n))\n\tv := r.Int31()\n\tfor v > max {\n\t\tv = r.Int31()\n\t}\n\treturn v % n\n}\n\n\/\/ Intn returns, as an int, a non-negative pseudo-random number in [0,n).\n\/\/ It panics if n <= 0.\nfunc (r *Rand) Intn(n int) int {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Intn\")\n\t}\n\tif n <= 1<<31-1 {\n\t\treturn int(r.Int31n(int32(n)))\n\t}\n\treturn int(r.Int63n(int64(n)))\n}\n\n\/\/ Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).\nfunc (r *Rand) Float64() float64 {\n\t\/\/ A clearer, simpler implementation would be:\n\t\/\/\treturn float64(r.Int63n(1<<53)) \/ (1<<53)\n\t\/\/ However, Go 1 shipped with\n\t\/\/\treturn float64(r.Int63()) \/ (1 << 63)\n\t\/\/ and we want to preserve that value stream.\n\t\/\/\n\t\/\/ There is one bug in the value stream: r.Int63() may be so close\n\t\/\/ to 1<<63 that the division rounds up to 1.0, and we've guaranteed\n\t\/\/ that the result is always less than 1.0.\n\t\/\/\n\t\/\/ We tried to fix this by mapping 1.0 back to 0.0, but since float64\n\t\/\/ values near 0 are much denser than near 1, mapping 1 to 0 caused\n\t\/\/ a theoretically significant overshoot in the probability of returning 0.\n\t\/\/ Instead of that, if we round up to 1, just try again.\n\t\/\/ Getting 1 only happens 1\/2⁵³ of the time, so most clients\n\t\/\/ will not observe it anyway.\nagain:\n\tf := float64(r.Int63()) \/ (1 << 63)\n\tif f == 1 {\n\t\tgoto again \/\/ resample; this branch is taken O(never)\n\t}\n\treturn f\n}\n\n\/\/ Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).\nfunc (r *Rand) Float32() float32 {\n\t\/\/ Same rationale as in Float64: we want to preserve the Go 1 value\n\t\/\/ stream except we want to fix it not to return 1.0\n\t\/\/ This only happens 1\/2²⁴ of the time (plus the 1\/2⁵³ of the time in Float64).\nagain:\n\tf := float32(r.Float64())\n\tif f == 1 {\n\t\tgoto again \/\/ resample; this branch is taken O(very rarely)\n\t}\n\treturn f\n}\n\n\/\/ Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).\nfunc (r *Rand) Perm(n int) []int {\n\tm := make([]int, n)\n\t\/\/ In the following loop, the iteration when i=0 always swaps m[0] with m[0].\n\t\/\/ A change to remove this useless iteration is to assign 1 to i in the init\n\t\/\/ statement. But Perm also effects r. Making this change will affect\n\t\/\/ the final state of r. So this change can't be made for compatibility\n\t\/\/ reasons for Go 1.\n\tfor i := 0; i < n; i++ {\n\t\tj := r.Intn(i + 1)\n\t\tm[i] = m[j]\n\t\tm[j] = i\n\t}\n\treturn m\n}\n\n\/\/ Read generates len(p) random bytes and writes them into p. It\n\/\/ always returns len(p) and a nil error.\nfunc (r *Rand) Read(p []byte) (n int, err error) {\n\tfor i := 0; i < len(p); i += 7 {\n\t\tval := r.src.Int63()\n\t\tfor j := 0; i+j < len(p) && j < 7; j++ {\n\t\t\tp[i+j] = byte(val)\n\t\t\tval >>= 8\n\t\t}\n\t}\n\treturn len(p), nil\n}\n\n\/*\n * Top-level convenience functions\n *\/\n\nvar globalRand = New(&lockedSource{src: NewSource(1)})\n\n\/\/ Seed uses the provided seed value to initialize the default Source to a\n\/\/ deterministic state. If Seed is not called, the generator behaves as\n\/\/ if seeded by Seed(1).\nfunc Seed(seed int64) { globalRand.Seed(seed) }\n\n\/\/ Int63 returns a non-negative pseudo-random 63-bit integer as an int64\n\/\/ from the default Source.\nfunc Int63() int64 { return globalRand.Int63() }\n\n\/\/ Uint32 returns a pseudo-random 32-bit value as a uint32\n\/\/ from the default Source.\nfunc Uint32() uint32 { return globalRand.Uint32() }\n\n\/\/ Int31 returns a non-negative pseudo-random 31-bit integer as an int32\n\/\/ from the default Source.\nfunc Int31() int32 { return globalRand.Int31() }\n\n\/\/ Int returns a non-negative pseudo-random int from the default Source.\nfunc Int() int { return globalRand.Int() }\n\n\/\/ Int63n returns, as an int64, a non-negative pseudo-random number in [0,n)\n\/\/ from the default Source.\n\/\/ It panics if n <= 0.\nfunc Int63n(n int64) int64 { return globalRand.Int63n(n) }\n\n\/\/ Int31n returns, as an int32, a non-negative pseudo-random number in [0,n)\n\/\/ from the default Source.\n\/\/ It panics if n <= 0.\nfunc Int31n(n int32) int32 { return globalRand.Int31n(n) }\n\n\/\/ Intn returns, as an int, a non-negative pseudo-random number in [0,n)\n\/\/ from the default Source.\n\/\/ It panics if n <= 0.\nfunc Intn(n int) int { return globalRand.Intn(n) }\n\n\/\/ Float64 returns, as a float64, a pseudo-random number in [0.0,1.0)\n\/\/ from the default Source.\nfunc Float64() float64 { return globalRand.Float64() }\n\n\/\/ Float32 returns, as a float32, a pseudo-random number in [0.0,1.0)\n\/\/ from the default Source.\nfunc Float32() float32 { return globalRand.Float32() }\n\n\/\/ Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)\n\/\/ from the default Source.\nfunc Perm(n int) []int { return globalRand.Perm(n) }\n\n\/\/ Read generates len(p) random bytes from the default Source and\n\/\/ writes them into p. It always returns len(p) and a nil error.\nfunc Read(p []byte) (n int, err error) { return globalRand.Read(p) }\n\n\/\/ NormFloat64 returns a normally distributed float64 in the range\n\/\/ [-math.MaxFloat64, +math.MaxFloat64] with\n\/\/ standard normal distribution (mean = 0, stddev = 1)\n\/\/ from the default Source.\n\/\/ To produce a different normal distribution, callers can\n\/\/ adjust the output using:\n\/\/\n\/\/ sample = NormFloat64() * desiredStdDev + desiredMean\n\/\/\nfunc NormFloat64() float64 { return globalRand.NormFloat64() }\n\n\/\/ ExpFloat64 returns an exponentially distributed float64 in the range\n\/\/ (0, +math.MaxFloat64] with an exponential distribution whose rate parameter\n\/\/ (lambda) is 1 and whose mean is 1\/lambda (1) from the default Source.\n\/\/ To produce a distribution with a different rate parameter,\n\/\/ callers can adjust the output using:\n\/\/\n\/\/ sample = ExpFloat64() \/ desiredRateParameter\n\/\/\nfunc ExpFloat64() float64 { return globalRand.ExpFloat64() }\n\ntype lockedSource struct {\n\tlk sync.Mutex\n\tsrc Source\n}\n\nfunc (r *lockedSource) Int63() (n int64) {\n\tr.lk.Lock()\n\tn = r.src.Int63()\n\tr.lk.Unlock()\n\treturn\n}\n\nfunc (r *lockedSource) Seed(seed int64) {\n\tr.lk.Lock()\n\tr.src.Seed(seed)\n\tr.lk.Unlock()\n}\n<commit_msg>math\/rand: Doc fix for how many bits Seed uses<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package rand implements pseudo-random number generators.\n\/\/\n\/\/ Random numbers are generated by a Source. Top-level functions, such as\n\/\/ Float64 and Int, use a default shared Source that produces a deterministic\n\/\/ sequence of values each time a program is run. Use the Seed function to\n\/\/ initialize the default Source if different behavior is required for each run.\n\/\/ The default Source is safe for concurrent use by multiple goroutines.\n\/\/\n\/\/ For random numbers suitable for security-sensitive work, see the crypto\/rand\n\/\/ package.\npackage rand\n\nimport \"sync\"\n\n\/\/ A Source represents a source of uniformly-distributed\n\/\/ pseudo-random int64 values in the range [0, 1<<63).\ntype Source interface {\n\tInt63() int64\n\tSeed(seed int64)\n}\n\n\/\/ NewSource returns a new pseudo-random Source seeded with the given value.\nfunc NewSource(seed int64) Source {\n\tvar rng rngSource\n\trng.Seed(seed)\n\treturn &rng\n}\n\n\/\/ A Rand is a source of random numbers.\ntype Rand struct {\n\tsrc Source\n}\n\n\/\/ New returns a new Rand that uses random values from src\n\/\/ to generate other random values.\nfunc New(src Source) *Rand { return &Rand{src} }\n\n\/\/ Seed uses the provided seed value to initialize the generator to a deterministic state.\nfunc (r *Rand) Seed(seed int64) { r.src.Seed(seed) }\n\n\/\/ Int63 returns a non-negative pseudo-random 63-bit integer as an int64.\nfunc (r *Rand) Int63() int64 { return r.src.Int63() }\n\n\/\/ Uint32 returns a pseudo-random 32-bit value as a uint32.\nfunc (r *Rand) Uint32() uint32 { return uint32(r.Int63() >> 31) }\n\n\/\/ Int31 returns a non-negative pseudo-random 31-bit integer as an int32.\nfunc (r *Rand) Int31() int32 { return int32(r.Int63() >> 32) }\n\n\/\/ Int returns a non-negative pseudo-random int.\nfunc (r *Rand) Int() int {\n\tu := uint(r.Int63())\n\treturn int(u << 1 >> 1) \/\/ clear sign bit if int == int32\n}\n\n\/\/ Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).\n\/\/ It panics if n <= 0.\nfunc (r *Rand) Int63n(n int64) int64 {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Int63n\")\n\t}\n\tif n&(n-1) == 0 { \/\/ n is power of two, can mask\n\t\treturn r.Int63() & (n - 1)\n\t}\n\tmax := int64((1 << 63) - 1 - (1<<63)%uint64(n))\n\tv := r.Int63()\n\tfor v > max {\n\t\tv = r.Int63()\n\t}\n\treturn v % n\n}\n\n\/\/ Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).\n\/\/ It panics if n <= 0.\nfunc (r *Rand) Int31n(n int32) int32 {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Int31n\")\n\t}\n\tif n&(n-1) == 0 { \/\/ n is power of two, can mask\n\t\treturn r.Int31() & (n - 1)\n\t}\n\tmax := int32((1 << 31) - 1 - (1<<31)%uint32(n))\n\tv := r.Int31()\n\tfor v > max {\n\t\tv = r.Int31()\n\t}\n\treturn v % n\n}\n\n\/\/ Intn returns, as an int, a non-negative pseudo-random number in [0,n).\n\/\/ It panics if n <= 0.\nfunc (r *Rand) Intn(n int) int {\n\tif n <= 0 {\n\t\tpanic(\"invalid argument to Intn\")\n\t}\n\tif n <= 1<<31-1 {\n\t\treturn int(r.Int31n(int32(n)))\n\t}\n\treturn int(r.Int63n(int64(n)))\n}\n\n\/\/ Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).\nfunc (r *Rand) Float64() float64 {\n\t\/\/ A clearer, simpler implementation would be:\n\t\/\/\treturn float64(r.Int63n(1<<53)) \/ (1<<53)\n\t\/\/ However, Go 1 shipped with\n\t\/\/\treturn float64(r.Int63()) \/ (1 << 63)\n\t\/\/ and we want to preserve that value stream.\n\t\/\/\n\t\/\/ There is one bug in the value stream: r.Int63() may be so close\n\t\/\/ to 1<<63 that the division rounds up to 1.0, and we've guaranteed\n\t\/\/ that the result is always less than 1.0.\n\t\/\/\n\t\/\/ We tried to fix this by mapping 1.0 back to 0.0, but since float64\n\t\/\/ values near 0 are much denser than near 1, mapping 1 to 0 caused\n\t\/\/ a theoretically significant overshoot in the probability of returning 0.\n\t\/\/ Instead of that, if we round up to 1, just try again.\n\t\/\/ Getting 1 only happens 1\/2⁵³ of the time, so most clients\n\t\/\/ will not observe it anyway.\nagain:\n\tf := float64(r.Int63()) \/ (1 << 63)\n\tif f == 1 {\n\t\tgoto again \/\/ resample; this branch is taken O(never)\n\t}\n\treturn f\n}\n\n\/\/ Float32 returns, as a float32, a pseudo-random number in [0.0,1.0).\nfunc (r *Rand) Float32() float32 {\n\t\/\/ Same rationale as in Float64: we want to preserve the Go 1 value\n\t\/\/ stream except we want to fix it not to return 1.0\n\t\/\/ This only happens 1\/2²⁴ of the time (plus the 1\/2⁵³ of the time in Float64).\nagain:\n\tf := float32(r.Float64())\n\tif f == 1 {\n\t\tgoto again \/\/ resample; this branch is taken O(very rarely)\n\t}\n\treturn f\n}\n\n\/\/ Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).\nfunc (r *Rand) Perm(n int) []int {\n\tm := make([]int, n)\n\t\/\/ In the following loop, the iteration when i=0 always swaps m[0] with m[0].\n\t\/\/ A change to remove this useless iteration is to assign 1 to i in the init\n\t\/\/ statement. But Perm also effects r. Making this change will affect\n\t\/\/ the final state of r. So this change can't be made for compatibility\n\t\/\/ reasons for Go 1.\n\tfor i := 0; i < n; i++ {\n\t\tj := r.Intn(i + 1)\n\t\tm[i] = m[j]\n\t\tm[j] = i\n\t}\n\treturn m\n}\n\n\/\/ Read generates len(p) random bytes and writes them into p. It\n\/\/ always returns len(p) and a nil error.\nfunc (r *Rand) Read(p []byte) (n int, err error) {\n\tfor i := 0; i < len(p); i += 7 {\n\t\tval := r.src.Int63()\n\t\tfor j := 0; i+j < len(p) && j < 7; j++ {\n\t\t\tp[i+j] = byte(val)\n\t\t\tval >>= 8\n\t\t}\n\t}\n\treturn len(p), nil\n}\n\n\/*\n * Top-level convenience functions\n *\/\n\nvar globalRand = New(&lockedSource{src: NewSource(1)})\n\n\/\/ Seed uses the provided seed value to initialize the default Source to a\n\/\/ deterministic state. If Seed is not called, the generator behaves as\n\/\/ if seeded by Seed(1). Only uses the bottom 31 bits of seed; the top 33\n\/\/ bits are ignored.\nfunc Seed(seed int64) { globalRand.Seed(seed) }\n\n\/\/ Int63 returns a non-negative pseudo-random 63-bit integer as an int64\n\/\/ from the default Source.\nfunc Int63() int64 { return globalRand.Int63() }\n\n\/\/ Uint32 returns a pseudo-random 32-bit value as a uint32\n\/\/ from the default Source.\nfunc Uint32() uint32 { return globalRand.Uint32() }\n\n\/\/ Int31 returns a non-negative pseudo-random 31-bit integer as an int32\n\/\/ from the default Source.\nfunc Int31() int32 { return globalRand.Int31() }\n\n\/\/ Int returns a non-negative pseudo-random int from the default Source.\nfunc Int() int { return globalRand.Int() }\n\n\/\/ Int63n returns, as an int64, a non-negative pseudo-random number in [0,n)\n\/\/ from the default Source.\n\/\/ It panics if n <= 0.\nfunc Int63n(n int64) int64 { return globalRand.Int63n(n) }\n\n\/\/ Int31n returns, as an int32, a non-negative pseudo-random number in [0,n)\n\/\/ from the default Source.\n\/\/ It panics if n <= 0.\nfunc Int31n(n int32) int32 { return globalRand.Int31n(n) }\n\n\/\/ Intn returns, as an int, a non-negative pseudo-random number in [0,n)\n\/\/ from the default Source.\n\/\/ It panics if n <= 0.\nfunc Intn(n int) int { return globalRand.Intn(n) }\n\n\/\/ Float64 returns, as a float64, a pseudo-random number in [0.0,1.0)\n\/\/ from the default Source.\nfunc Float64() float64 { return globalRand.Float64() }\n\n\/\/ Float32 returns, as a float32, a pseudo-random number in [0.0,1.0)\n\/\/ from the default Source.\nfunc Float32() float32 { return globalRand.Float32() }\n\n\/\/ Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)\n\/\/ from the default Source.\nfunc Perm(n int) []int { return globalRand.Perm(n) }\n\n\/\/ Read generates len(p) random bytes from the default Source and\n\/\/ writes them into p. It always returns len(p) and a nil error.\nfunc Read(p []byte) (n int, err error) { return globalRand.Read(p) }\n\n\/\/ NormFloat64 returns a normally distributed float64 in the range\n\/\/ [-math.MaxFloat64, +math.MaxFloat64] with\n\/\/ standard normal distribution (mean = 0, stddev = 1)\n\/\/ from the default Source.\n\/\/ To produce a different normal distribution, callers can\n\/\/ adjust the output using:\n\/\/\n\/\/ sample = NormFloat64() * desiredStdDev + desiredMean\n\/\/\nfunc NormFloat64() float64 { return globalRand.NormFloat64() }\n\n\/\/ ExpFloat64 returns an exponentially distributed float64 in the range\n\/\/ (0, +math.MaxFloat64] with an exponential distribution whose rate parameter\n\/\/ (lambda) is 1 and whose mean is 1\/lambda (1) from the default Source.\n\/\/ To produce a distribution with a different rate parameter,\n\/\/ callers can adjust the output using:\n\/\/\n\/\/ sample = ExpFloat64() \/ desiredRateParameter\n\/\/\nfunc ExpFloat64() float64 { return globalRand.ExpFloat64() }\n\ntype lockedSource struct {\n\tlk sync.Mutex\n\tsrc Source\n}\n\nfunc (r *lockedSource) Int63() (n int64) {\n\tr.lk.Lock()\n\tn = r.src.Int63()\n\tr.lk.Unlock()\n\treturn\n}\n\nfunc (r *lockedSource) Seed(seed int64) {\n\tr.lk.Lock()\n\tr.src.Seed(seed)\n\tr.lk.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/SparrowDb\/sparrowdb\/db\"\n\t\"github.com\/SparrowDb\/sparrowdb\/errors\"\n\t\"github.com\/SparrowDb\/sparrowdb\/model\"\n\t\"github.com\/SparrowDb\/sparrowdb\/slog\"\n\t\"github.com\/SparrowDb\/sparrowdb\/spql\"\n\t\"github.com\/SparrowDb\/sparrowdb\/util\"\n\t\"github.com\/SparrowDb\/sparrowdb\/util\/uuid\"\n\t\"github.com\/nats-io\/nats\"\n)\n\nvar (\n\t_config *db.SparrowConfig\n\t_dbm *db.DBManager\n\t_queryExecutor *spql.QueryExecutor\n\t_connection *nats.Conn\n\t_encon *nats.EncodedConn\n\t_enconData *nats.EncodedConn\n\n\t_sparrowQuerySub = \"sparrow.query\"\n\t_sparrowDataSub = \"sparrow.data\"\n\n\tchRecvData chan string\n\tchSendData chan string\n)\n\ntype message struct {\n\tName string\n\tVars map[string]string\n\tContent interface{}\n}\n\nfunc connect() {\n\tvar err error\n\t_connection, err = nats.Connect(_config.PublisherServers)\n\tif err != nil {\n\t\tslog.Fatalf(err.Error())\n\t}\n\n\t_encon, err = nats.NewEncodedConn(_connection, nats.JSON_ENCODER)\n\tif err != nil {\n\t\tslog.Fatalf(err.Error())\n\t}\n\n\t_enconData, err = nats.NewEncodedConn(_connection, nats.GOB_ENCODER)\n\tif err != nil {\n\t\tslog.Fatalf(err.Error())\n\t}\n\n\tregisterReceiverBinder()\n}\n\nfunc registerReceiverBinder() {\n\t_encon.Subscribe(_sparrowQuerySub, func(m *message) {\n\t\tif m.Name != _config.NodeName {\n\t\t\tvar qr spql.QueryRequest\n\t\t\tstr := m.Content.(string)\n\t\t\tjson.Unmarshal([]byte(str), &qr)\n\n\t\t\tq, _ := qr.ParseQuery()\n\t\t\tresults := <-_queryExecutor.ExecuteQuery(&q)\n\t\t\tif results == nil {\n\t\t\t\tslog.Errorf(errors.ErrEmptyQueryResult.Error())\n\t\t\t}\n\t\t}\n\t})\n\n\t_enconData.Subscribe(_sparrowDataSub, func(m *message) {\n\t\tif m.Name != _config.NodeName {\n\t\t\tdbname := m.Vars[\"database\"]\n\n\t\t\tif db, ok := _dbm.GetDatabase(dbname); ok == true {\n\t\t\t\tbs := util.NewByteStreamFromBytes(m.Content.([]byte))\n\t\t\t\tdf := model.NewDataDefinitionFromByteStream(bs)\n\n\t\t\t\tstoredDf, found := db.GetDataByKey(df.Key)\n\n\t\t\t\tif found {\n\t\t\t\t\ttm, _ := uuid.ParseUUID(df.Token)\n\t\t\t\t\tstm, _ := uuid.ParseUUID(storedDf.Token)\n\n\t\t\t\t\tif tm.Time().Before(stm.Time()) || tm.Time().Equal(stm.Time()) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tdf.Revision = storedDf.Revision\n\t\t\t\t\tdf.Revision++\n\t\t\t\t}\n\n\t\t\t\tdb.InsertCheckUpsert(df, true)\n\t\t\t}\n\t\t}\n\t})\n}\n\n\/\/ PublishQuery publishes query\nfunc PublishQuery(query spql.QueryRequest) {\n\tif query.Action == \"select\" {\n\t\treturn\n\t}\n\n\tb, _ := json.Marshal(query)\n\tm := message{\n\t\t_config.NodeName,\n\t\tmap[string]string{},\n\t\tstring(b),\n\t}\n\t_encon.Publish(_sparrowQuerySub, m)\n}\n\n\/\/ PublishData plushes data\nfunc PublishData(df model.DataDefinition, dbname string) {\n\tm := message{\n\t\t_config.NodeName,\n\t\tmap[string]string{\n\t\t\t\"database\": dbname,\n\t\t},\n\t\tdf.ToByteStream().Bytes(),\n\t}\n\t_enconData.Publish(_sparrowDataSub, m)\n}\n\n\/\/ Close finishes cluster service\nfunc Close() {\n\tslog.Infof(\"Stopping Cluster service\")\n\t_encon.Close()\n\t_enconData.Close()\n\t_connection.Close()\n}\n\n\/\/ Start Starts cluster service\nfunc Start(config *db.SparrowConfig, dbm *db.DBManager) {\n\tslog.Infof(\"Starting Cluster service\")\n\t_config = config\n\t_dbm = dbm\n\t_queryExecutor = spql.NewQueryExecutor(dbm)\n\tconnect()\n}\n<commit_msg>nats log<commit_after>package cluster\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/SparrowDb\/sparrowdb\/db\"\n\t\"github.com\/SparrowDb\/sparrowdb\/errors\"\n\t\"github.com\/SparrowDb\/sparrowdb\/model\"\n\t\"github.com\/SparrowDb\/sparrowdb\/slog\"\n\t\"github.com\/SparrowDb\/sparrowdb\/spql\"\n\t\"github.com\/SparrowDb\/sparrowdb\/util\"\n\t\"github.com\/SparrowDb\/sparrowdb\/util\/uuid\"\n\t\"github.com\/nats-io\/nats\"\n)\n\nvar (\n\t_config *db.SparrowConfig\n\t_dbm *db.DBManager\n\t_queryExecutor *spql.QueryExecutor\n\t_connection *nats.Conn\n\t_encon *nats.EncodedConn\n\t_enconData *nats.EncodedConn\n\n\t_sparrowQuerySub = \"sparrow.query\"\n\t_sparrowDataSub = \"sparrow.data\"\n\n\tchRecvData chan string\n\tchSendData chan string\n)\n\ntype message struct {\n\tName string\n\tVars map[string]string\n\tContent interface{}\n}\n\nfunc onreconnect(nc *nats.Conn) {\n\tslog.Warnf(\"Reconnected to %v\\n\", nc.ConnectedUrl())\n}\n\nfunc ondisconnect(nc *nats.Conn) {\n\tslog.Warnf(\"Disconnected from cluster\\n\")\n}\n\nfunc onclose(nc *nats.Conn) {\n\tslog.Warnf(\"Connection closed. Reason: %q\\n\", nc.LastError())\n}\n\nfunc onerror(nc *nats.Conn, sub *nats.Subscription, err error) {\n\tslog.Errorf(\"Cluster error:%v\\n\", err.Error())\n}\n\nfunc connect() {\n\tvar err error\n\t_connection, err = nats.Connect(_config.PublisherServers)\n\t_connection.SetReconnectHandler(onreconnect)\n\t_connection.SetDisconnectHandler(ondisconnect)\n\t_connection.SetClosedHandler(ondisconnect)\n\t_connection.SetErrorHandler(onerror)\n\n\tif err != nil {\n\t\tslog.Fatalf(err.Error())\n\t}\n\n\t_encon, err = nats.NewEncodedConn(_connection, nats.JSON_ENCODER)\n\tif err != nil {\n\t\tslog.Fatalf(err.Error())\n\t}\n\n\t_enconData, err = nats.NewEncodedConn(_connection, nats.GOB_ENCODER)\n\tif err != nil {\n\t\tslog.Fatalf(err.Error())\n\t}\n\n\tregisterReceiverBinder()\n}\n\nfunc registerReceiverBinder() {\n\t_encon.Subscribe(_sparrowQuerySub, func(m *message) {\n\t\tif m.Name != _config.NodeName {\n\t\t\tvar qr spql.QueryRequest\n\t\t\tstr := m.Content.(string)\n\t\t\tjson.Unmarshal([]byte(str), &qr)\n\n\t\t\tq, _ := qr.ParseQuery()\n\t\t\tresults := <-_queryExecutor.ExecuteQuery(&q)\n\t\t\tif results == nil {\n\t\t\t\tslog.Errorf(errors.ErrEmptyQueryResult.Error())\n\t\t\t}\n\t\t}\n\t})\n\n\t_enconData.Subscribe(_sparrowDataSub, func(m *message) {\n\t\tif m.Name != _config.NodeName {\n\t\t\tdbname := m.Vars[\"database\"]\n\n\t\t\tif db, ok := _dbm.GetDatabase(dbname); ok == true {\n\t\t\t\tbs := util.NewByteStreamFromBytes(m.Content.([]byte))\n\t\t\t\tdf := model.NewDataDefinitionFromByteStream(bs)\n\n\t\t\t\tstoredDf, found := db.GetDataByKey(df.Key)\n\n\t\t\t\tif found {\n\t\t\t\t\ttm, _ := uuid.ParseUUID(df.Token)\n\t\t\t\t\tstm, _ := uuid.ParseUUID(storedDf.Token)\n\n\t\t\t\t\tif tm.Time().Before(stm.Time()) || tm.Time().Equal(stm.Time()) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tdf.Revision = storedDf.Revision\n\t\t\t\t\tdf.Revision++\n\t\t\t\t}\n\n\t\t\t\tdb.InsertCheckUpsert(df, true)\n\t\t\t}\n\t\t}\n\t})\n}\n\n\/\/ PublishQuery publishes query\nfunc PublishQuery(query spql.QueryRequest) {\n\tif query.Action == \"select\" {\n\t\treturn\n\t}\n\n\tb, _ := json.Marshal(query)\n\tm := message{\n\t\t_config.NodeName,\n\t\tmap[string]string{},\n\t\tstring(b),\n\t}\n\t_encon.Publish(_sparrowQuerySub, m)\n}\n\n\/\/ PublishData plushes data\nfunc PublishData(df model.DataDefinition, dbname string) {\n\tm := message{\n\t\t_config.NodeName,\n\t\tmap[string]string{\n\t\t\t\"database\": dbname,\n\t\t},\n\t\tdf.ToByteStream().Bytes(),\n\t}\n\t_enconData.Publish(_sparrowDataSub, m)\n}\n\n\/\/ Close finishes cluster service\nfunc Close() {\n\tslog.Infof(\"Stopping Cluster service\")\n\t_encon.Close()\n\t_enconData.Close()\n\t_connection.Close()\n}\n\n\/\/ Start Starts cluster service\nfunc Start(config *db.SparrowConfig, dbm *db.DBManager) {\n\tslog.Infof(\"Starting Cluster service\")\n\t_config = config\n\t_dbm = dbm\n\t_queryExecutor = spql.NewQueryExecutor(dbm)\n\tconnect()\n}\n<|endoftext|>"} {"text":"<commit_before>\npackage main\n\nimport(\n \"fmt\"\n \"path\/filepath\"\n \"os\/exec\"\n \"strings\"\n)\n\nfunc main() {\n files, _ := filepath.Glob(\"..\/mapgen\/map-10[0]*.xml\")\n fmt.Println(files)\n for _,element := range files {\n out := strings.Split(element, \"\/\")[2]\n out = strings.Split(out, \".\")[0] + \".gexf\"\n fmt.Println(out)\n command := exec.Command(\"prun\", \"-no-panda\", \"..\/src\/cgc\", \"1\", \"-G\", \"-m\",\n element, \"-g\", out)\n command.Run()\n }\n}\n<commit_msg>Updated make gexf script.<commit_after>\npackage main\n\nimport(\n \"fmt\"\n \"path\/filepath\"\n \"os\/exec\"\n \"strings\"\n)\n\nfunc main() {\n files, _ := filepath.Glob(\"..\/mapgen\/map-*.xml\")\n \/\/ fmt.Println(files)\n for i := 0; i < len(files); i++ {\n element := files[i]\n out := strings.Split(element, \"\/\")[2]\n out = strings.Split(out, \".\")[0] + \".gexf\"\n var size int\n fmt.Sscanf(out, \"map-%d.gexf\", &size)\n if size > 10000 {\n fmt.Println(\"Skipping gexf with\", size, \"nodes.\")\n continue\n }\n fmt.Println(out)\n\n command := exec.Command(\"prun\", \"-no-panda\", \"..\/src\/cgc\", \"1\", \"-G\", \"-t\",\n \"50\", \"-m\", element, \"-g\", out)\n command.Run()\n }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build js\n\npackage audio\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n)\n\nvar context *js.Object\n\ntype player struct {\n\tsrc io.ReadSeeker\n\tsampleRate int\n\tposition float64\n\tbufferSource *js.Object\n}\n\nfunc initialize() bool {\n\t\/\/ Do nothing in node.js.\n\tif js.Global.Get(\"require\") != js.Undefined {\n\t\treturn false\n\t}\n\n\tclass := js.Global.Get(\"AudioContext\")\n\tif class == js.Undefined {\n\t\tclass = js.Global.Get(\"webkitAudioContext\")\n\t}\n\tif class == js.Undefined {\n\t\treturn false\n\t}\n\tcontext = class.New()\n\treturn true\n}\n\nfunc newPlayer(src io.ReadSeeker, sampleRate int) *Player {\n\tif context == nil {\n\t\tif !initialize() {\n\t\t\tpanic(\"audio couldn't be initialized\")\n\t\t}\n\t}\n\n\tp := &player{\n\t\tsrc: src,\n\t\tsampleRate: sampleRate,\n\t\tposition: context.Get(\"currentTime\").Float(),\n\t\tbufferSource: nil,\n\t}\n\treturn &Player{p}\n}\n\nfunc toLR(data []byte) ([]int16, []int16) {\n\tl := make([]int16, len(data)\/4)\n\tr := make([]int16, len(data)\/4)\n\tfor i := 0; i < len(data)\/4; i++ {\n\t\tl[i] = int16(data[4*i]) | int16(data[4*i+1])<<8\n\t\tr[i] = int16(data[4*i+2]) | int16(data[4*i+3])<<8\n\t}\n\treturn l, r\n}\n\nfunc (p *player) play() error {\n\t\/\/ TODO: Reading all data at once is temporary implemntation. Treat this as stream.\n\tbuf, err := ioutil.ReadAll(p.src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(buf) == 0 {\n\t\treturn nil\n\t}\n\t\/\/ TODO: p.position should be updated\n\tif p.bufferSource != nil {\n\t\tp.bufferSource.Call(\"start\", p.position)\n\t\treturn nil\n\t}\n\tconst channelNum = 2\n\tconst bytesPerSample = channelNum * 16 \/ 8\n\tb := context.Call(\"createBuffer\", channelNum, len(buf)\/bytesPerSample, p.sampleRate)\n\tl := b.Call(\"getChannelData\", 0)\n\tr := b.Call(\"getChannelData\", 1)\n\til, ir := toLR(buf)\n\tconst max = 1 << 15\n\tfor i := 0; i < len(il); i++ {\n\t\tl.SetIndex(i, float64(il[i])\/max)\n\t\tr.SetIndex(i, float64(ir[i])\/max)\n\t}\n\tp.bufferSource = context.Call(\"createBufferSource\")\n\tp.bufferSource.Set(\"buffer\", b)\n\tp.bufferSource.Call(\"connect\", context.Get(\"destination\"))\n\tp.bufferSource.Call(\"start\", p.position)\n\tp.position += b.Get(\"duration\").Float()\n\treturn nil\n}\n\nfunc (p *player) close() error {\n\tp.bufferSource.Call(\"stop\")\n\tp.bufferSource.Call(\"disconnect\")\n\treturn nil\n}\n<commit_msg>audio: Bug fix: newPlayer should return error in JS<commit_after>\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build js\n\npackage audio\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n)\n\nvar context *js.Object\n\ntype player struct {\n\tsrc io.ReadSeeker\n\tsampleRate int\n\tposition float64\n\tbufferSource *js.Object\n}\n\nfunc initialize() bool {\n\t\/\/ Do nothing in node.js.\n\tif js.Global.Get(\"require\") != js.Undefined {\n\t\treturn false\n\t}\n\n\tclass := js.Global.Get(\"AudioContext\")\n\tif class == js.Undefined {\n\t\tclass = js.Global.Get(\"webkitAudioContext\")\n\t}\n\tif class == js.Undefined {\n\t\treturn false\n\t}\n\tcontext = class.New()\n\treturn true\n}\n\nfunc newPlayer(src io.ReadSeeker, sampleRate int) (*Player, error) {\n\tif context == nil {\n\t\tif !initialize() {\n\t\t\tpanic(\"audio couldn't be initialized\")\n\t\t}\n\t}\n\n\tp := &player{\n\t\tsrc: src,\n\t\tsampleRate: sampleRate,\n\t\tposition: context.Get(\"currentTime\").Float(),\n\t\tbufferSource: nil,\n\t}\n\treturn &Player{p}, nil\n}\n\nfunc toLR(data []byte) ([]int16, []int16) {\n\tl := make([]int16, len(data)\/4)\n\tr := make([]int16, len(data)\/4)\n\tfor i := 0; i < len(data)\/4; i++ {\n\t\tl[i] = int16(data[4*i]) | int16(data[4*i+1])<<8\n\t\tr[i] = int16(data[4*i+2]) | int16(data[4*i+3])<<8\n\t}\n\treturn l, r\n}\n\nfunc (p *player) play() error {\n\t\/\/ TODO: Reading all data at once is temporary implemntation. Treat this as stream.\n\tbuf, err := ioutil.ReadAll(p.src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(buf) == 0 {\n\t\treturn nil\n\t}\n\t\/\/ TODO: p.position should be updated\n\tif p.bufferSource != nil {\n\t\tp.bufferSource.Call(\"start\", p.position)\n\t\treturn nil\n\t}\n\tconst channelNum = 2\n\tconst bytesPerSample = channelNum * 16 \/ 8\n\tb := context.Call(\"createBuffer\", channelNum, len(buf)\/bytesPerSample, p.sampleRate)\n\tl := b.Call(\"getChannelData\", 0)\n\tr := b.Call(\"getChannelData\", 1)\n\til, ir := toLR(buf)\n\tconst max = 1 << 15\n\tfor i := 0; i < len(il); i++ {\n\t\tl.SetIndex(i, float64(il[i])\/max)\n\t\tr.SetIndex(i, float64(ir[i])\/max)\n\t}\n\tp.bufferSource = context.Call(\"createBufferSource\")\n\tp.bufferSource.Set(\"buffer\", b)\n\tp.bufferSource.Call(\"connect\", context.Get(\"destination\"))\n\tp.bufferSource.Call(\"start\", p.position)\n\tp.position += b.Get(\"duration\").Float()\n\treturn nil\n}\n\nfunc (p *player) close() error {\n\tp.bufferSource.Call(\"stop\")\n\tp.bufferSource.Call(\"disconnect\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build js\n\npackage audio\n\nimport (\n\t\"io\"\n\t\"runtime\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n)\n\ntype player struct {\n\tsrc io.Reader\n\tsampleRate int\n\tpositionInSamples int64\n\tcontext *js.Object\n\tbufferSource *js.Object\n}\n\nfunc startPlaying(src io.Reader, sampleRate int) (*player, error) {\n\t\/\/ Do nothing in node.js.\n\tif js.Global.Get(\"require\") != js.Undefined {\n\t\treturn nil, nil\n\t}\n\n\tclass := js.Global.Get(\"AudioContext\")\n\tif class == js.Undefined {\n\t\tclass = js.Global.Get(\"webkitAudioContext\")\n\t}\n\tif class == js.Undefined {\n\t\tpanic(\"audio: audio couldn't be initialized\")\n\t}\n\tp := &player{\n\t\tsrc: src,\n\t\tsampleRate: sampleRate,\n\t\tbufferSource: nil,\n\t\tcontext: class.New(),\n\t}\n\tp.positionInSamples = int64(p.context.Get(\"currentTime\").Float() * float64(p.sampleRate))\n\tif err := p.start(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\nfunc toLR(data []byte) ([]int16, []int16) {\n\tl := make([]int16, len(data)\/4)\n\tr := make([]int16, len(data)\/4)\n\tfor i := 0; i < len(data)\/4; i++ {\n\t\tl[i] = int16(data[4*i]) | int16(data[4*i+1])<<8\n\t\tr[i] = int16(data[4*i+2]) | int16(data[4*i+3])<<8\n\t}\n\treturn l, r\n}\n\nfunc max64(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc (p *player) proceed() error {\n\tconst bufferSize = 4096\n\tc := int64(p.context.Get(\"currentTime\").Float() * float64(p.sampleRate))\n\tif p.positionInSamples < c {\n\t\tp.positionInSamples = c\n\t}\n\tb := make([]byte, bufferSize)\n\tfor 0 < len(b) {\n\t\tn, err := p.src.Read(b)\n\t\tif 0 < n {\n\t\t\tconst channelNum = 2\n\t\t\tconst bytesPerSample = channelNum * 16 \/ 8\n\t\t\tbuf := p.context.Call(\"createBuffer\", channelNum, n\/bytesPerSample, p.sampleRate)\n\t\t\tl := buf.Call(\"getChannelData\", 0)\n\t\t\tr := buf.Call(\"getChannelData\", 1)\n\t\t\til, ir := toLR(b[:n])\n\t\t\tconst max = 1 << 15\n\t\t\tfor i := 0; i < len(il); i++ {\n\t\t\t\tl.SetIndex(i, float64(il[i])\/max)\n\t\t\t\tr.SetIndex(i, float64(ir[i])\/max)\n\t\t\t}\n\t\t\tp.bufferSource = p.context.Call(\"createBufferSource\")\n\t\t\tp.bufferSource.Set(\"buffer\", buf)\n\t\t\tp.bufferSource.Call(\"connect\", p.context.Get(\"destination\"))\n\t\t\tp.bufferSource.Call(\"start\", float64(max64(p.positionInSamples, c))\/float64(p.sampleRate))\n\t\t\tp.positionInSamples += int64(len(il))\n\t\t\tb = b[n:]\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\truntime.Gosched()\n\t}\n\treturn nil\n}\n\nfunc (p *player) start() error {\n\t\/\/ TODO: What if play is already called?\n\tgo func() {\n\t\tdefer p.close()\n\t\tfor {\n\t\t\terr := p.proceed()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t\/\/ TODO: Record the last error\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (p *player) close() error {\n\tif p.bufferSource == nil {\n\t\treturn nil\n\t}\n\tp.bufferSource.Call(\"stop\")\n\tp.bufferSource.Call(\"disconnect\")\n\tp.bufferSource = nil\n\treturn nil\n}\n<commit_msg>audio: Refactoring: Remove unneeded for loop<commit_after>\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build js\n\npackage audio\n\nimport (\n\t\"io\"\n\t\"runtime\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n)\n\ntype player struct {\n\tsrc io.Reader\n\tsampleRate int\n\tpositionInSamples int64\n\tcontext *js.Object\n\tbufferSource *js.Object\n}\n\nfunc startPlaying(src io.Reader, sampleRate int) (*player, error) {\n\t\/\/ Do nothing in node.js.\n\tif js.Global.Get(\"require\") != js.Undefined {\n\t\treturn nil, nil\n\t}\n\n\tclass := js.Global.Get(\"AudioContext\")\n\tif class == js.Undefined {\n\t\tclass = js.Global.Get(\"webkitAudioContext\")\n\t}\n\tif class == js.Undefined {\n\t\tpanic(\"audio: audio couldn't be initialized\")\n\t}\n\tp := &player{\n\t\tsrc: src,\n\t\tsampleRate: sampleRate,\n\t\tbufferSource: nil,\n\t\tcontext: class.New(),\n\t}\n\tp.positionInSamples = int64(p.context.Get(\"currentTime\").Float() * float64(p.sampleRate))\n\tif err := p.start(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\nfunc toLR(data []byte) ([]int16, []int16) {\n\tl := make([]int16, len(data)\/4)\n\tr := make([]int16, len(data)\/4)\n\tfor i := 0; i < len(data)\/4; i++ {\n\t\tl[i] = int16(data[4*i]) | int16(data[4*i+1])<<8\n\t\tr[i] = int16(data[4*i+2]) | int16(data[4*i+3])<<8\n\t}\n\treturn l, r\n}\n\nfunc max64(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc (p *player) proceed() error {\n\tconst bufferSize = 4096\n\tc := int64(p.context.Get(\"currentTime\").Float() * float64(p.sampleRate))\n\tif p.positionInSamples < c {\n\t\tp.positionInSamples = c\n\t}\n\tb := make([]byte, bufferSize)\n\tn, err := p.src.Read(b)\n\tif 0 < n {\n\t\tconst channelNum = 2\n\t\tconst bytesPerSample = channelNum * 16 \/ 8\n\t\tbuf := p.context.Call(\"createBuffer\", channelNum, n\/bytesPerSample, p.sampleRate)\n\t\tl := buf.Call(\"getChannelData\", 0)\n\t\tr := buf.Call(\"getChannelData\", 1)\n\t\til, ir := toLR(b[:n])\n\t\tconst max = 1 << 15\n\t\tfor i := 0; i < len(il); i++ {\n\t\t\tl.SetIndex(i, float64(il[i])\/max)\n\t\t\tr.SetIndex(i, float64(ir[i])\/max)\n\t\t}\n\t\tp.bufferSource = p.context.Call(\"createBufferSource\")\n\t\tp.bufferSource.Set(\"buffer\", buf)\n\t\tp.bufferSource.Call(\"connect\", p.context.Get(\"destination\"))\n\t\tp.bufferSource.Call(\"start\", float64(p.positionInSamples)\/float64(p.sampleRate))\n\t\tp.positionInSamples += int64(len(il))\n\t}\n\treturn err\n}\n\nfunc (p *player) start() error {\n\t\/\/ TODO: What if play is already called?\n\tgo func() {\n\t\tdefer p.close()\n\t\tfor {\n\t\t\terr := p.proceed()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t\/\/ TODO: Record the last error\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (p *player) close() error {\n\tif p.bufferSource == nil {\n\t\treturn nil\n\t}\n\tp.bufferSource.Call(\"stop\")\n\tp.bufferSource.Call(\"disconnect\")\n\tp.bufferSource = nil\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package weed_server\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/images\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\nvar fileNameEscaper = strings.NewReplacer(\"\\\\\", \"\\\\\\\\\", \"\\\"\", \"\\\\\\\"\")\n\nfunc (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {\n\tn := new(storage.Needle)\n\tvid, fid, filename, ext, _ := parseURLPath(r.URL.Path)\n\tvolumeId, err := storage.NewVolumeId(vid)\n\tif err != nil {\n\t\tglog.V(2).Infoln(\"parsing error:\", err, r.URL.Path)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\terr = n.ParsePath(fid)\n\tif err != nil {\n\t\tglog.V(2).Infoln(\"parsing fid error:\", err, r.URL.Path)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tglog.V(4).Infoln(\"volume\", volumeId, \"reading\", n)\n\tif !vs.store.HasVolume(volumeId) {\n\t\tif !vs.ReadRedirect {\n\t\t\tglog.V(2).Infoln(\"volume is not local:\", err, r.URL.Path)\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tlookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())\n\t\tglog.V(2).Infoln(\"volume\", volumeId, \"found on\", lookupResult, \"error\", err)\n\t\tif err == nil && len(lookupResult.Locations) > 0 {\n\t\t\tu, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))\n\t\t\tu.Path = r.URL.Path\n\t\t\targ := url.Values{}\n\t\t\tif c := r.FormValue(\"collection\"); c != \"\" {\n\t\t\t\targ.Set(\"collection\", c)\n\t\t\t}\n\t\t\tu.RawQuery = arg.Encode()\n\t\t\thttp.Redirect(w, r, u.String(), http.StatusMovedPermanently)\n\n\t\t} else {\n\t\t\tglog.V(2).Infoln(\"lookup error:\", err, r.URL.Path)\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\tcookie := n.Cookie\n\tcount, e := vs.store.ReadVolumeNeedle(volumeId, n)\n\tglog.V(4).Infoln(\"read bytes\", count, \"error\", e)\n\tif e != nil || count < 0 {\n\t\tglog.V(0).Infof(\"read %s error:\", r.URL.Path, e)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tif n.Cookie != cookie {\n\t\tglog.V(0).Infoln(\"request\", r.URL.Path, \"with unmaching cookie seen:\", cookie, \"expected:\", n.Cookie, \"from\", r.RemoteAddr, \"agent\", r.UserAgent())\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tif n.LastModified != 0 {\n\t\tw.Header().Set(\"Last-Modified\", time.Unix(int64(n.LastModified), 0).UTC().Format(http.TimeFormat))\n\t\tif r.Header.Get(\"If-Modified-Since\") != \"\" {\n\t\t\tif t, parseError := time.Parse(http.TimeFormat, r.Header.Get(\"If-Modified-Since\")); parseError == nil {\n\t\t\t\tif t.Unix() >= int64(n.LastModified) {\n\t\t\t\t\tw.WriteHeader(http.StatusNotModified)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tetag := n.Etag()\n\tif inm := r.Header.Get(\"If-None-Match\"); inm == etag {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\tw.Header().Set(\"Etag\", etag)\n\n\tif n.HasPairs() {\n\t\tpairMap := make(map[string]string)\n\t\terr = json.Unmarshal(n.Pairs, &pairMap)\n\t\tif err != nil {\n\t\t\tglog.V(0).Infoln(\"Unmarshal pairs error:\", err)\n\t\t}\n\t\tfor k, v := range pairMap {\n\t\t\tw.Header().Set(k, v)\n\t\t}\n\t}\n\n\tif vs.tryHandleChunkedFile(n, filename, w, r) {\n\t\treturn\n\t}\n\n\tif n.NameSize > 0 && filename == \"\" {\n\t\tfilename = string(n.Name)\n\t\tif ext == \"\" {\n\t\t\text = path.Ext(filename)\n\t\t}\n\t}\n\tmtype := \"\"\n\tif n.MimeSize > 0 {\n\t\tmt := string(n.Mime)\n\t\tif !strings.HasPrefix(mt, \"application\/octet-stream\") {\n\t\t\tmtype = mt\n\t\t}\n\t}\n\n\tif ext != \".gz\" {\n\t\tif n.IsGzipped() {\n\t\t\tif strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\t\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\t\t} else {\n\t\t\t\tif n.Data, err = operation.UnGzipData(n.Data); err != nil {\n\t\t\t\t\tglog.V(0).Infoln(\"ungzip error:\", err, r.URL.Path)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\trs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)\n\n\tif e := writeResponseContent(filename, mtype, rs, w, r); e != nil {\n\t\tglog.V(2).Infoln(\"response write error:\", e)\n\t}\n}\n\nfunc (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {\n\tif !n.IsChunkedManifest() || r.URL.Query().Get(\"cm\") == \"false\" {\n\t\treturn false\n\t}\n\n\tchunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())\n\tif e != nil {\n\t\tglog.V(0).Infof(\"load chunked manifest (%s) error: %v\", r.URL.Path, e)\n\t\treturn false\n\t}\n\tif fileName == \"\" && chunkManifest.Name != \"\" {\n\t\tfileName = chunkManifest.Name\n\t}\n\n\text := path.Ext(fileName)\n\n\tmType := \"\"\n\tif chunkManifest.Mime != \"\" {\n\t\tmt := chunkManifest.Mime\n\t\tif !strings.HasPrefix(mt, \"application\/octet-stream\") {\n\t\t\tmType = mt\n\t\t}\n\t}\n\n\tw.Header().Set(\"X-File-Store\", \"chunked\")\n\n\tchunkedFileReader := &operation.ChunkedFileReader{\n\t\tManifest: chunkManifest,\n\t\tMaster: vs.GetMaster(),\n\t}\n\tdefer chunkedFileReader.Close()\n\n\trs := conditionallyResizeImages(chunkedFileReader, ext, r)\n\n\tif e := writeResponseContent(fileName, mType, rs, w, r); e != nil {\n\t\tglog.V(2).Infoln(\"response write error:\", e)\n\t}\n\treturn true\n}\n\nfunc conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {\n\trs := originalDataReaderSeeker\n\tif len(ext) > 0 {\n\t\text = strings.ToLower(ext)\n\t}\n\tif ext == \".png\" || ext == \".jpg\" || ext == \".jpeg\" || ext == \".gif\" {\n\t\twidth, height := 0, 0\n\t\tif r.FormValue(\"width\") != \"\" {\n\t\t\twidth, _ = strconv.Atoi(r.FormValue(\"width\"))\n\t\t}\n\t\tif r.FormValue(\"height\") != \"\" {\n\t\t\theight, _ = strconv.Atoi(r.FormValue(\"height\"))\n\t\t}\n\t\trs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue(\"mode\"))\n\t}\n\treturn rs\n}\n\nfunc writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {\n\ttotalSize, e := rs.Seek(0, 2)\n\tif mimeType == \"\" {\n\t\tif ext := path.Ext(filename); ext != \"\" {\n\t\t\tmimeType = mime.TypeByExtension(ext)\n\t\t}\n\t}\n\tif mimeType != \"\" {\n\t\tw.Header().Set(\"Content-Type\", mimeType)\n\t}\n\tif filename != \"\" {\n\t\tcontentDisposition := \"inline\"\n\t\tif r.FormValue(\"dl\") != \"\" {\n\t\t\tif dl, _ := strconv.ParseBool(r.FormValue(\"dl\")); dl {\n\t\t\t\tcontentDisposition = \"attachment\"\n\t\t\t}\n\t\t}\n\t\tw.Header().Set(\"Content-Disposition\", contentDisposition+`; filename=\"`+fileNameEscaper.Replace(filename)+`\"`)\n\t}\n\tw.Header().Set(\"Accept-Ranges\", \"bytes\")\n\tif r.Method == \"HEAD\" {\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(totalSize, 10))\n\t\treturn nil\n\t}\n\trangeReq := r.Header.Get(\"Range\")\n\tif rangeReq == \"\" {\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(totalSize, 10))\n\t\tif _, e = rs.Seek(0, 0); e != nil {\n\t\t\treturn e\n\t\t}\n\t\t_, e = io.Copy(w, rs)\n\t\treturn e\n\t}\n\n\t\/\/the rest is dealing with partial content request\n\t\/\/mostly copy from src\/pkg\/net\/http\/fs.go\n\tranges, err := parseRange(rangeReq, totalSize)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)\n\t\treturn nil\n\t}\n\tif sumRangesSize(ranges) > totalSize {\n\t\t\/\/ The total number of bytes in all the ranges\n\t\t\/\/ is larger than the size of the file by\n\t\t\/\/ itself, so this is probably an attack, or a\n\t\t\/\/ dumb client. Ignore the range request.\n\t\treturn nil\n\t}\n\tif len(ranges) == 0 {\n\t\treturn nil\n\t}\n\tif len(ranges) == 1 {\n\t\t\/\/ RFC 2616, Section 14.16:\n\t\t\/\/ \"When an HTTP message includes the content of a single\n\t\t\/\/ range (for example, a response to a request for a\n\t\t\/\/ single range, or to a request for a set of ranges\n\t\t\/\/ that overlap without any holes), this content is\n\t\t\/\/ transmitted with a Content-Range header, and a\n\t\t\/\/ Content-Length header showing the number of bytes\n\t\t\/\/ actually transferred.\n\t\t\/\/ ...\n\t\t\/\/ A response to a request for a single range MUST NOT\n\t\t\/\/ be sent using the multipart\/byteranges media type.\"\n\t\tra := ranges[0]\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(ra.length, 10))\n\t\tw.Header().Set(\"Content-Range\", ra.contentRange(totalSize))\n\t\tw.WriteHeader(http.StatusPartialContent)\n\t\tif _, e = rs.Seek(ra.start, 0); e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\t_, e = io.CopyN(w, rs, ra.length)\n\t\treturn e\n\t}\n\t\/\/ process multiple ranges\n\tfor _, ra := range ranges {\n\t\tif ra.start > totalSize {\n\t\t\thttp.Error(w, \"Out of Range\", http.StatusRequestedRangeNotSatisfiable)\n\t\t\treturn nil\n\t\t}\n\t}\n\tsendSize := rangesMIMESize(ranges, mimeType, totalSize)\n\tpr, pw := io.Pipe()\n\tmw := multipart.NewWriter(pw)\n\tw.Header().Set(\"Content-Type\", \"multipart\/byteranges; boundary=\"+mw.Boundary())\n\tsendContent := pr\n\tdefer pr.Close() \/\/ cause writing goroutine to fail and exit if CopyN doesn't finish.\n\tgo func() {\n\t\tfor _, ra := range ranges {\n\t\t\tpart, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))\n\t\t\tif e != nil {\n\t\t\t\tpw.CloseWithError(e)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, e = rs.Seek(ra.start, 0); e != nil {\n\t\t\t\tpw.CloseWithError(e)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, e = io.CopyN(part, rs, ra.length); e != nil {\n\t\t\t\tpw.CloseWithError(e)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tmw.Close()\n\t\tpw.Close()\n\t}()\n\tif w.Header().Get(\"Content-Encoding\") == \"\" {\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(sendSize, 10))\n\t}\n\tw.WriteHeader(http.StatusPartialContent)\n\t_, e = io.CopyN(w, sendContent, sendSize)\n\treturn e\n}\n<commit_msg>fix build error<commit_after>package weed_server\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/images\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\nvar fileNameEscaper = strings.NewReplacer(\"\\\\\", \"\\\\\\\\\", \"\\\"\", \"\\\\\\\"\")\n\nfunc (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {\n\tn := new(storage.Needle)\n\tvid, fid, filename, ext, _ := parseURLPath(r.URL.Path)\n\tvolumeId, err := storage.NewVolumeId(vid)\n\tif err != nil {\n\t\tglog.V(2).Infoln(\"parsing error:\", err, r.URL.Path)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\terr = n.ParsePath(fid)\n\tif err != nil {\n\t\tglog.V(2).Infoln(\"parsing fid error:\", err, r.URL.Path)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tglog.V(4).Infoln(\"volume\", volumeId, \"reading\", n)\n\tif !vs.store.HasVolume(volumeId) {\n\t\tif !vs.ReadRedirect {\n\t\t\tglog.V(2).Infoln(\"volume is not local:\", err, r.URL.Path)\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tlookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())\n\t\tglog.V(2).Infoln(\"volume\", volumeId, \"found on\", lookupResult, \"error\", err)\n\t\tif err == nil && len(lookupResult.Locations) > 0 {\n\t\t\tu, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))\n\t\t\tu.Path = r.URL.Path\n\t\t\targ := url.Values{}\n\t\t\tif c := r.FormValue(\"collection\"); c != \"\" {\n\t\t\t\targ.Set(\"collection\", c)\n\t\t\t}\n\t\t\tu.RawQuery = arg.Encode()\n\t\t\thttp.Redirect(w, r, u.String(), http.StatusMovedPermanently)\n\n\t\t} else {\n\t\t\tglog.V(2).Infoln(\"lookup error:\", err, r.URL.Path)\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\tcookie := n.Cookie\n\tcount, e := vs.store.ReadVolumeNeedle(volumeId, n)\n\tglog.V(4).Infoln(\"read bytes\", count, \"error\", e)\n\tif e != nil || count < 0 {\n\t\tglog.V(0).Infof(\"read %s error: %v\", r.URL.Path, e)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tif n.Cookie != cookie {\n\t\tglog.V(0).Infoln(\"request\", r.URL.Path, \"with unmaching cookie seen:\", cookie, \"expected:\", n.Cookie, \"from\", r.RemoteAddr, \"agent\", r.UserAgent())\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tif n.LastModified != 0 {\n\t\tw.Header().Set(\"Last-Modified\", time.Unix(int64(n.LastModified), 0).UTC().Format(http.TimeFormat))\n\t\tif r.Header.Get(\"If-Modified-Since\") != \"\" {\n\t\t\tif t, parseError := time.Parse(http.TimeFormat, r.Header.Get(\"If-Modified-Since\")); parseError == nil {\n\t\t\t\tif t.Unix() >= int64(n.LastModified) {\n\t\t\t\t\tw.WriteHeader(http.StatusNotModified)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tetag := n.Etag()\n\tif inm := r.Header.Get(\"If-None-Match\"); inm == etag {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\tw.Header().Set(\"Etag\", etag)\n\n\tif n.HasPairs() {\n\t\tpairMap := make(map[string]string)\n\t\terr = json.Unmarshal(n.Pairs, &pairMap)\n\t\tif err != nil {\n\t\t\tglog.V(0).Infoln(\"Unmarshal pairs error:\", err)\n\t\t}\n\t\tfor k, v := range pairMap {\n\t\t\tw.Header().Set(k, v)\n\t\t}\n\t}\n\n\tif vs.tryHandleChunkedFile(n, filename, w, r) {\n\t\treturn\n\t}\n\n\tif n.NameSize > 0 && filename == \"\" {\n\t\tfilename = string(n.Name)\n\t\tif ext == \"\" {\n\t\t\text = path.Ext(filename)\n\t\t}\n\t}\n\tmtype := \"\"\n\tif n.MimeSize > 0 {\n\t\tmt := string(n.Mime)\n\t\tif !strings.HasPrefix(mt, \"application\/octet-stream\") {\n\t\t\tmtype = mt\n\t\t}\n\t}\n\n\tif ext != \".gz\" {\n\t\tif n.IsGzipped() {\n\t\t\tif strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\t\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\t\t} else {\n\t\t\t\tif n.Data, err = operation.UnGzipData(n.Data); err != nil {\n\t\t\t\t\tglog.V(0).Infoln(\"ungzip error:\", err, r.URL.Path)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\trs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)\n\n\tif e := writeResponseContent(filename, mtype, rs, w, r); e != nil {\n\t\tglog.V(2).Infoln(\"response write error:\", e)\n\t}\n}\n\nfunc (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {\n\tif !n.IsChunkedManifest() || r.URL.Query().Get(\"cm\") == \"false\" {\n\t\treturn false\n\t}\n\n\tchunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())\n\tif e != nil {\n\t\tglog.V(0).Infof(\"load chunked manifest (%s) error: %v\", r.URL.Path, e)\n\t\treturn false\n\t}\n\tif fileName == \"\" && chunkManifest.Name != \"\" {\n\t\tfileName = chunkManifest.Name\n\t}\n\n\text := path.Ext(fileName)\n\n\tmType := \"\"\n\tif chunkManifest.Mime != \"\" {\n\t\tmt := chunkManifest.Mime\n\t\tif !strings.HasPrefix(mt, \"application\/octet-stream\") {\n\t\t\tmType = mt\n\t\t}\n\t}\n\n\tw.Header().Set(\"X-File-Store\", \"chunked\")\n\n\tchunkedFileReader := &operation.ChunkedFileReader{\n\t\tManifest: chunkManifest,\n\t\tMaster: vs.GetMaster(),\n\t}\n\tdefer chunkedFileReader.Close()\n\n\trs := conditionallyResizeImages(chunkedFileReader, ext, r)\n\n\tif e := writeResponseContent(fileName, mType, rs, w, r); e != nil {\n\t\tglog.V(2).Infoln(\"response write error:\", e)\n\t}\n\treturn true\n}\n\nfunc conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {\n\trs := originalDataReaderSeeker\n\tif len(ext) > 0 {\n\t\text = strings.ToLower(ext)\n\t}\n\tif ext == \".png\" || ext == \".jpg\" || ext == \".jpeg\" || ext == \".gif\" {\n\t\twidth, height := 0, 0\n\t\tif r.FormValue(\"width\") != \"\" {\n\t\t\twidth, _ = strconv.Atoi(r.FormValue(\"width\"))\n\t\t}\n\t\tif r.FormValue(\"height\") != \"\" {\n\t\t\theight, _ = strconv.Atoi(r.FormValue(\"height\"))\n\t\t}\n\t\trs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue(\"mode\"))\n\t}\n\treturn rs\n}\n\nfunc writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {\n\ttotalSize, e := rs.Seek(0, 2)\n\tif mimeType == \"\" {\n\t\tif ext := path.Ext(filename); ext != \"\" {\n\t\t\tmimeType = mime.TypeByExtension(ext)\n\t\t}\n\t}\n\tif mimeType != \"\" {\n\t\tw.Header().Set(\"Content-Type\", mimeType)\n\t}\n\tif filename != \"\" {\n\t\tcontentDisposition := \"inline\"\n\t\tif r.FormValue(\"dl\") != \"\" {\n\t\t\tif dl, _ := strconv.ParseBool(r.FormValue(\"dl\")); dl {\n\t\t\t\tcontentDisposition = \"attachment\"\n\t\t\t}\n\t\t}\n\t\tw.Header().Set(\"Content-Disposition\", contentDisposition+`; filename=\"`+fileNameEscaper.Replace(filename)+`\"`)\n\t}\n\tw.Header().Set(\"Accept-Ranges\", \"bytes\")\n\tif r.Method == \"HEAD\" {\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(totalSize, 10))\n\t\treturn nil\n\t}\n\trangeReq := r.Header.Get(\"Range\")\n\tif rangeReq == \"\" {\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(totalSize, 10))\n\t\tif _, e = rs.Seek(0, 0); e != nil {\n\t\t\treturn e\n\t\t}\n\t\t_, e = io.Copy(w, rs)\n\t\treturn e\n\t}\n\n\t\/\/the rest is dealing with partial content request\n\t\/\/mostly copy from src\/pkg\/net\/http\/fs.go\n\tranges, err := parseRange(rangeReq, totalSize)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)\n\t\treturn nil\n\t}\n\tif sumRangesSize(ranges) > totalSize {\n\t\t\/\/ The total number of bytes in all the ranges\n\t\t\/\/ is larger than the size of the file by\n\t\t\/\/ itself, so this is probably an attack, or a\n\t\t\/\/ dumb client. Ignore the range request.\n\t\treturn nil\n\t}\n\tif len(ranges) == 0 {\n\t\treturn nil\n\t}\n\tif len(ranges) == 1 {\n\t\t\/\/ RFC 2616, Section 14.16:\n\t\t\/\/ \"When an HTTP message includes the content of a single\n\t\t\/\/ range (for example, a response to a request for a\n\t\t\/\/ single range, or to a request for a set of ranges\n\t\t\/\/ that overlap without any holes), this content is\n\t\t\/\/ transmitted with a Content-Range header, and a\n\t\t\/\/ Content-Length header showing the number of bytes\n\t\t\/\/ actually transferred.\n\t\t\/\/ ...\n\t\t\/\/ A response to a request for a single range MUST NOT\n\t\t\/\/ be sent using the multipart\/byteranges media type.\"\n\t\tra := ranges[0]\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(ra.length, 10))\n\t\tw.Header().Set(\"Content-Range\", ra.contentRange(totalSize))\n\t\tw.WriteHeader(http.StatusPartialContent)\n\t\tif _, e = rs.Seek(ra.start, 0); e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\t_, e = io.CopyN(w, rs, ra.length)\n\t\treturn e\n\t}\n\t\/\/ process multiple ranges\n\tfor _, ra := range ranges {\n\t\tif ra.start > totalSize {\n\t\t\thttp.Error(w, \"Out of Range\", http.StatusRequestedRangeNotSatisfiable)\n\t\t\treturn nil\n\t\t}\n\t}\n\tsendSize := rangesMIMESize(ranges, mimeType, totalSize)\n\tpr, pw := io.Pipe()\n\tmw := multipart.NewWriter(pw)\n\tw.Header().Set(\"Content-Type\", \"multipart\/byteranges; boundary=\"+mw.Boundary())\n\tsendContent := pr\n\tdefer pr.Close() \/\/ cause writing goroutine to fail and exit if CopyN doesn't finish.\n\tgo func() {\n\t\tfor _, ra := range ranges {\n\t\t\tpart, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))\n\t\t\tif e != nil {\n\t\t\t\tpw.CloseWithError(e)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, e = rs.Seek(ra.start, 0); e != nil {\n\t\t\t\tpw.CloseWithError(e)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, e = io.CopyN(part, rs, ra.length); e != nil {\n\t\t\t\tpw.CloseWithError(e)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tmw.Close()\n\t\tpw.Close()\n\t}()\n\tif w.Header().Get(\"Content-Encoding\") == \"\" {\n\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(sendSize, 10))\n\t}\n\tw.WriteHeader(http.StatusPartialContent)\n\t_, e = io.CopyN(w, sendContent, sendSize)\n\treturn e\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n)\n\nconst TAG_PREFIX = \"tag_\"\n\ntype Storage interface {\n\tinit() error\n\n\tGet(string) ([]byte, error)\n\tPut(string, []byte) error\n\tGetReader(string) (io.ReadCloser, error)\n\tPutReader(string, io.Reader, func(io.Reader)) error\n\tList(string) ([]string, error)\n\tExists(string) (bool, error)\n\tSize(string) (int64, error)\n\tRemove(string) error\n\tRemoveAll(string) error\n}\n\ntype Config struct {\n\tType string\n\tLocal *Local\n\tS3 *S3\n}\n\nfunc New(cfg *Config) (Storage, error) {\n\tswitch cfg.Type {\n\tcase \"local\":\n\t\tif cfg.Local != nil {\n\t\t\treturn cfg.Local, cfg.Local.init()\n\t\t}\n\t\treturn nil, errors.New(\"No config for storage type 'local' found\")\n\tcase \"s3\":\n\t\tif cfg.S3 != nil {\n\t\t\treturn cfg.S3, cfg.S3.init()\n\t\t}\n\t\treturn nil, errors.New(\"No config for storage type 's3' found\")\n\tdefault:\n\t\treturn nil, errors.New(\"Invalid storage type: \" + cfg.Type)\n\t}\n}\n\nfunc ImageJsonPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/json\", id)\n}\n\nfunc ImageMarkPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_inprogress\", id)\n}\n\nfunc ImageChecksumPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_checksum\", id)\n}\n\nfunc ImageLayerPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/layer\", id)\n}\n\nfunc ImageAncestryPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/ancestry\", id)\n}\n\nfunc ImageFilesPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_files\", id)\n}\n\nfunc ImageDiffPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_diff\", id)\n}\n\nfunc RepoImagesListPath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/_images_list\", path.Join(namespace, repo))\n}\n\nfunc RepoTagPath(namespace, repo, tag string) string {\n\tif tag == \"\" {\n\t\treturn fmt.Sprintf(\"repositories\/%s\", path.Join(namespace, repo))\n\t}\n\treturn fmt.Sprintf(\"repositories\/%s\/%s\", path.Join(namespace, repo), TAG_PREFIX+tag)\n}\n\nfunc RepoJsonPath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/json\", path.Join(namespace, repo))\n}\n\nfunc RepoIndexImagesPath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/_index_images\", path.Join(namespace, repo))\n}\n\nfunc RepoPrivatePath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/_private\", path.Join(namespace, repo))\n}\n<commit_msg>lowercase storage config json keys<commit_after>package storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n)\n\nconst TAG_PREFIX = \"tag_\"\n\ntype Storage interface {\n\tinit() error\n\n\tGet(string) ([]byte, error)\n\tPut(string, []byte) error\n\tGetReader(string) (io.ReadCloser, error)\n\tPutReader(string, io.Reader, func(io.Reader)) error\n\tList(string) ([]string, error)\n\tExists(string) (bool, error)\n\tSize(string) (int64, error)\n\tRemove(string) error\n\tRemoveAll(string) error\n}\n\ntype Config struct {\n\tType string `json:\"type\"`\n\tLocal *Local `json:\"local\"`\n\tS3 *S3 `json:\"s3\"`\n}\n\nfunc New(cfg *Config) (Storage, error) {\n\tswitch cfg.Type {\n\tcase \"local\":\n\t\tif cfg.Local != nil {\n\t\t\treturn cfg.Local, cfg.Local.init()\n\t\t}\n\t\treturn nil, errors.New(\"No config for storage type 'local' found\")\n\tcase \"s3\":\n\t\tif cfg.S3 != nil {\n\t\t\treturn cfg.S3, cfg.S3.init()\n\t\t}\n\t\treturn nil, errors.New(\"No config for storage type 's3' found\")\n\tdefault:\n\t\treturn nil, errors.New(\"Invalid storage type: \" + cfg.Type)\n\t}\n}\n\nfunc ImageJsonPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/json\", id)\n}\n\nfunc ImageMarkPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_inprogress\", id)\n}\n\nfunc ImageChecksumPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_checksum\", id)\n}\n\nfunc ImageLayerPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/layer\", id)\n}\n\nfunc ImageAncestryPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/ancestry\", id)\n}\n\nfunc ImageFilesPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_files\", id)\n}\n\nfunc ImageDiffPath(id string) string {\n\treturn fmt.Sprintf(\"images\/%s\/_diff\", id)\n}\n\nfunc RepoImagesListPath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/_images_list\", path.Join(namespace, repo))\n}\n\nfunc RepoTagPath(namespace, repo, tag string) string {\n\tif tag == \"\" {\n\t\treturn fmt.Sprintf(\"repositories\/%s\", path.Join(namespace, repo))\n\t}\n\treturn fmt.Sprintf(\"repositories\/%s\/%s\", path.Join(namespace, repo), TAG_PREFIX+tag)\n}\n\nfunc RepoJsonPath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/json\", path.Join(namespace, repo))\n}\n\nfunc RepoIndexImagesPath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/_index_images\", path.Join(namespace, repo))\n}\n\nfunc RepoPrivatePath(namespace, repo string) string {\n\treturn fmt.Sprintf(\"repositories\/%s\/_private\", path.Join(namespace, repo))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"os\/signal\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/console\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/email\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/influxdb\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/pd\"\n\t\"github.com\/eliothedeman\/bangarang\/api\"\n\t\"github.com\/eliothedeman\/bangarang\/config\"\n\t\"github.com\/eliothedeman\/bangarang\/pipeline\"\n\t_ \"github.com\/eliothedeman\/bangarang\/provider\/http\"\n\t_ \"github.com\/eliothedeman\/bangarang\/provider\/tcp\"\n)\n\nvar (\n\tconfFile = flag.String(\"conf\", \"\/etc\/bangarang\/conf.json\", \"path main config file\")\n)\n\nfunc init() {\n\tlogrus.SetLevel(logrus.WarnLevel)\n\ttf := &logrus.TextFormatter{}\n\ttf.FullTimestamp = true\n\tlogrus.SetFormatter(tf)\n}\n\nfunc handleSigs() {\n\tstop := make(chan os.Signal)\n\tsignal.Notify(stop, os.Kill, os.Interrupt)\n\n\tdone := <-stop\n\tlogrus.Fatal(done.String())\n}\n\nfunc main() {\n\tflag.Parse()\n\tlogrus.Infof(\"Loading config file %s\", *confFile)\n\tac, err := config.LoadConfigFile(*confFile)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tif ac.LogLevel == \"\" {\n\t\tac.LogLevel = config.DEFAULT_LOG_LEVEL\n\t}\n\n\tll, err := logrus.ParseLevel(ac.LogLevel)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t} else {\n\t\tlogrus.SetLevel(ll)\n\t}\n\n\tlogrus.Infof(\"Starting processing pipeline with %d policie(s)\", len(ac.Policies))\n\t\/\/ create and start up a new pipeline\n\tp := pipeline.NewPipeline(ac)\n\tp.Start()\n\n\tlogrus.Infof(\"Serving the http api on port %d\", 8081)\n\t\/\/ create and start a new api server\n\tapiServer := api.NewServer(ac.ApiPort, p, ac.Auths)\n\tapiServer.Serve()\n\thandleSigs()\n}\n<commit_msg>remove support for influxdb for the time being<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"os\/signal\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/console\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/email\"\n\t\/\/ _ \"github.com\/eliothedeman\/bangarang\/alarm\/influxdb\"\n\t_ \"github.com\/eliothedeman\/bangarang\/alarm\/pd\"\n\t\"github.com\/eliothedeman\/bangarang\/api\"\n\t\"github.com\/eliothedeman\/bangarang\/config\"\n\t\"github.com\/eliothedeman\/bangarang\/pipeline\"\n\t_ \"github.com\/eliothedeman\/bangarang\/provider\/http\"\n\t_ \"github.com\/eliothedeman\/bangarang\/provider\/tcp\"\n)\n\nvar (\n\tconfFile = flag.String(\"conf\", \"\/etc\/bangarang\/conf.json\", \"path main config file\")\n)\n\nfunc init() {\n\tlogrus.SetLevel(logrus.WarnLevel)\n\ttf := &logrus.TextFormatter{}\n\ttf.FullTimestamp = true\n\tlogrus.SetFormatter(tf)\n}\n\nfunc handleSigs() {\n\tstop := make(chan os.Signal)\n\tsignal.Notify(stop, os.Kill, os.Interrupt)\n\n\tdone := <-stop\n\tlogrus.Fatal(done.String())\n}\n\nfunc main() {\n\tflag.Parse()\n\tlogrus.Infof(\"Loading config file %s\", *confFile)\n\tac, err := config.LoadConfigFile(*confFile)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tif ac.LogLevel == \"\" {\n\t\tac.LogLevel = config.DEFAULT_LOG_LEVEL\n\t}\n\n\tll, err := logrus.ParseLevel(ac.LogLevel)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t} else {\n\t\tlogrus.SetLevel(ll)\n\t}\n\n\tlogrus.Infof(\"Starting processing pipeline with %d policie(s)\", len(ac.Policies))\n\t\/\/ create and start up a new pipeline\n\tp := pipeline.NewPipeline(ac)\n\tp.Start()\n\n\tlogrus.Infof(\"Serving the http api on port %d\", 8081)\n\t\/\/ create and start a new api server\n\tapiServer := api.NewServer(ac.ApiPort, p, ac.Auths)\n\tapiServer.Serve()\n\thandleSigs()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\tis \"github.com\/containers\/image\/storage\"\n\t\"github.com\/containers\/storage\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype jsonImage struct {\n\tID string `json:\"id\"`\n\tNames []string `json:\"names\"`\n}\n\ntype imageOutputParams struct {\n\tID string\n\tName string\n\tDigest string\n\tCreatedAt string\n\tSize string\n}\n\ntype filterParams struct {\n\tdangling string\n\tlabel string\n\tbeforeImage string \/\/ Images are sorted by date, so we can just output until we see the image\n\tsinceImage string \/\/ Images are sorted by date, so we can just output until we don't see the image\n\tbeforeDate time.Time\n\tsinceDate time.Time\n\treferencePattern string\n}\n\nvar (\n\timagesFlags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"quiet, q\",\n\t\t\tUsage: \"display only image IDs\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"noheading, n\",\n\t\t\tUsage: \"do not print column headings\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-trunc, notruncate\",\n\t\t\tUsage: \"do not truncate output\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"json\",\n\t\t\tUsage: \"output in JSON format\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"digests\",\n\t\t\tUsage: \"show digests\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"pretty-print images using a Go template. will override --quiet\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"filter, f\",\n\t\t\tUsage: \"filter output based on conditions provided (default [])\",\n\t\t},\n\t}\n\n\timagesDescription = \"Lists locally stored images.\"\n\timagesCommand = cli.Command{\n\t\tName: \"images\",\n\t\tUsage: \"List images in local storage\",\n\t\tDescription: imagesDescription,\n\t\tFlags: imagesFlags,\n\t\tAction: imagesCmd,\n\t\tArgsUsage: \" \",\n\t}\n)\n\nfunc imagesCmd(c *cli.Context) error {\n\tstore, err := getStore(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timages, err := store.Images()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error reading images\")\n\t}\n\n\tquiet := false\n\tif c.IsSet(\"quiet\") {\n\t\tquiet = c.Bool(\"quiet\")\n\t}\n\tnoheading := false\n\tif c.IsSet(\"noheading\") {\n\t\tnoheading = c.Bool(\"noheading\")\n\t}\n\ttruncate := true\n\tif c.IsSet(\"no-trunc\") {\n\t\ttruncate = !c.Bool(\"no-trunc\")\n\t}\n\tdigests := false\n\tif c.IsSet(\"digests\") {\n\t\tdigests = c.Bool(\"digests\")\n\t}\n\tformatString := \"\"\n\thasTemplate := false\n\tif c.IsSet(\"format\") {\n\t\tformatString = c.String(\"format\")\n\t\thasTemplate = true\n\t}\n\n\tname := \"\"\n\tif len(c.Args()) == 1 {\n\t\tname = c.Args().Get(0)\n\t} else if len(c.Args()) > 1 {\n\t\treturn errors.New(\"'buildah images' requires at most 1 argument\")\n\t}\n\tif c.IsSet(\"json\") {\n\t\tJSONImages := []jsonImage{}\n\t\tfor _, image := range images {\n\t\t\tJSONImages = append(JSONImages, jsonImage{ID: image.ID, Names: image.Names})\n\t\t}\n\t\tdata, err2 := json.MarshalIndent(JSONImages, \"\", \" \")\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", data)\n\t\treturn nil\n\t}\n\tif len(images) > 0 && !noheading && !quiet {\n\t\tif truncate {\n\t\t\tfmt.Printf(\"%-12s %s\\n\", \"IMAGE ID\", \"IMAGE NAME\")\n\t\t} else {\n\t\t\tfmt.Printf(\"%-64s %s\\n\", \"IMAGE ID\", \"IMAGE NAME\")\n\t\t}\n\t}\n\n\tvar params *filterParams\n\tif c.IsSet(\"filter\") {\n\t\tparams, err = parseFilter(images, c.String(\"filter\"))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error parsing filter\")\n\t\t}\n\t} else {\n\t\tparams = nil\n\t}\n\n\tif len(images) > 0 && !noheading && !quiet && !hasTemplate {\n\t\toutputHeader(truncate, digests)\n\t}\n\n\treturn outputImages(images, formatString, store, params, name, hasTemplate, truncate, digests, quiet)\n}\n\nfunc parseFilter(images []storage.Image, filter string) (*filterParams, error) {\n\tparams := new(filterParams)\n\tfilterStrings := strings.Split(filter, \",\")\n\tfor _, param := range filterStrings {\n\t\tpair := strings.SplitN(param, \"=\", 2)\n\t\tswitch strings.TrimSpace(pair[0]) {\n\t\tcase \"dangling\":\n\t\t\tif pair[1] == \"true\" || pair[1] == \"false\" {\n\t\t\t\tparams.dangling = pair[1]\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid filter: '%s=[%s]'\", pair[0], pair[1])\n\t\t\t}\n\t\tcase \"label\":\n\t\t\tparams.label = pair[1]\n\t\tcase \"before\":\n\t\t\tbeforeDate, err := setFilterDate(images, pair[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no such id: %s\", pair[0])\n\t\t\t}\n\t\t\tparams.beforeDate = beforeDate\n\t\tcase \"since\":\n\t\t\tsinceDate, err := setFilterDate(images, pair[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no such id: %s\", pair[0])\n\t\t\t}\n\t\t\tparams.sinceDate = sinceDate\n\t\tcase \"reference\":\n\t\t\tparams.referencePattern = pair[1]\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid filter: '%s'\", pair[0])\n\t\t}\n\t}\n\treturn params, nil\n}\n\nfunc setFilterDate(images []storage.Image, imgName string) (time.Time, error) {\n\tfor _, image := range images {\n\t\tfor _, name := range image.Names {\n\t\t\tif matchesReference(name, imgName) {\n\t\t\t\t\/\/ Set the date to this image\n\t\t\t\tim, err := parseMetadata(image)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn time.Time{}, errors.Wrapf(err, \"could not get creation date for image %q\", imgName)\n\t\t\t\t}\n\t\t\t\tdate := im.CreatedTime\n\t\t\t\treturn date, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn time.Time{}, fmt.Errorf(\"Could not locate image %q\", imgName)\n}\n\nfunc outputHeader(truncate, digests bool) {\n\tif truncate {\n\t\tfmt.Printf(\"%-20s %-56s \", \"IMAGE ID\", \"IMAGE NAME\")\n\t} else {\n\t\tfmt.Printf(\"%-64s %-56s \", \"IMAGE ID\", \"IMAGE NAME\")\n\t}\n\n\tif digests {\n\t\tfmt.Printf(\"%-64s \", \"DIGEST\")\n\t}\n\n\tfmt.Printf(\"%-22s %s\\n\", \"CREATED AT\", \"SIZE\")\n}\n\nfunc outputImages(images []storage.Image, format string, store storage.Store, filters *filterParams, argName string, hasTemplate, truncate, digests, quiet bool) error {\n\tfor _, image := range images {\n\t\timageMetadata, err := parseMetadata(image)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tcreatedTime := imageMetadata.CreatedTime.Format(\"Jan 2, 2006 15:04\")\n\t\tdigest := \"\"\n\t\tif len(imageMetadata.Blobs) > 0 {\n\t\t\tdigest = string(imageMetadata.Blobs[0].Digest)\n\t\t}\n\t\tsize, _ := getSize(image, store)\n\n\t\tnames := []string{\"\"}\n\t\tif len(image.Names) > 0 {\n\t\t\tnames = image.Names\n\t\t} else {\n\t\t\t\/\/ images without names should be printed with \"<none>\" as the image name\n\t\t\tnames = append(names, \"<none>\")\n\t\t}\n\t\tfor _, name := range names {\n\t\t\tif !matchesFilter(image, store, name, filters) || !matchesReference(name, argName) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif quiet {\n\t\t\t\tfmt.Printf(\"%-64s\\n\", image.ID)\n\t\t\t\t\/\/ We only want to print each id once\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tparams := imageOutputParams{\n\t\t\t\tID: image.ID,\n\t\t\t\tName: name,\n\t\t\t\tDigest: digest,\n\t\t\t\tCreatedAt: createdTime,\n\t\t\t\tSize: formattedSize(size),\n\t\t\t}\n\t\t\tif hasTemplate {\n\t\t\t\terr = outputUsingTemplate(format, params)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutputUsingFormatString(truncate, digests, params)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc matchesFilter(image storage.Image, store storage.Store, name string, params *filterParams) bool {\n\tif params == nil {\n\t\treturn true\n\t}\n\tif params.dangling != \"\" && !matchesDangling(name, params.dangling) {\n\t\treturn false\n\t} else if params.label != \"\" && !matchesLabel(image, store, params.label) {\n\t\treturn false\n\t} else if params.beforeImage != \"\" && !matchesBeforeImage(image, name, params) {\n\t\treturn false\n\t} else if params.sinceImage != \"\" && !matchesSinceImage(image, name, params) {\n\t\treturn false\n\t} else if params.referencePattern != \"\" && !matchesReference(name, params.referencePattern) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc matchesDangling(name string, dangling string) bool {\n\tif dangling == \"false\" && name != \"<none>\" {\n\t\treturn true\n\t} else if dangling == \"true\" && name == \"<none>\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc matchesLabel(image storage.Image, store storage.Store, label string) bool {\n\tstoreRef, err := is.Transport.ParseStoreReference(store, \"@\"+image.ID)\n\tif err != nil {\n\n\t}\n\timg, err := storeRef.NewImage(nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\tinfo, err := img.Inspect()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tpair := strings.SplitN(label, \"=\", 2)\n\tfor key, value := range info.Labels {\n\t\tif key == pair[0] {\n\t\t\tif len(pair) == 2 {\n\t\t\t\tif value == pair[1] {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Returns true if the image was created since the filter image. Returns\n\/\/ false otherwise\nfunc matchesBeforeImage(image storage.Image, name string, params *filterParams) bool {\n\tim, err := parseMetadata(image)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif im.CreatedTime.Before(params.beforeDate) {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Returns true if the image was created since the filter image. Returns\n\/\/ false otherwise\nfunc matchesSinceImage(image storage.Image, name string, params *filterParams) bool {\n\tim, err := parseMetadata(image)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif im.CreatedTime.After(params.sinceDate) {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc matchesID(id, argID string) bool {\n\treturn strings.HasPrefix(argID, id)\n}\n\nfunc matchesReference(name, argName string) bool {\n\tif argName == \"\" {\n\t\treturn true\n\t}\n\tsplitName := strings.Split(name, \":\")\n\t\/\/ If the arg contains a tag, we handle it differently than if it does not\n\tif strings.Contains(argName, \":\") {\n\t\tsplitArg := strings.Split(argName, \":\")\n\t\treturn strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1])\n\t}\n\treturn strings.HasSuffix(splitName[0], argName)\n}\n\nfunc formattedSize(size int64) string {\n\tsuffixes := [5]string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\"}\n\n\tcount := 0\n\tformattedSize := float64(size)\n\tfor formattedSize >= 1024 && count < 4 {\n\t\tformattedSize \/= 1024\n\t\tcount++\n\t}\n\treturn fmt.Sprintf(\"%.4g %s\", formattedSize, suffixes[count])\n}\n\nfunc outputUsingTemplate(format string, params imageOutputParams) error {\n\ttmpl, err := template.New(\"image\").Parse(format)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Template parsing error\")\n\t}\n\n\terr = tmpl.Execute(os.Stdout, params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println()\n\treturn nil\n}\n\nfunc outputUsingFormatString(truncate, digests bool, params imageOutputParams) {\n\tif truncate {\n\t\tfmt.Printf(\"%-20.12s %-56s\", params.ID, params.Name)\n\t} else {\n\t\tfmt.Printf(\"%-64s %-56s\", params.ID, params.Name)\n\t}\n\n\tif digests {\n\t\tfmt.Printf(\" %-64s\", params.Digest)\n\t}\n\tfmt.Printf(\" %-22s %s\\n\", params.CreatedAt, params.Size)\n}\n<commit_msg>Only print heading once when executing buildah images<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\tis \"github.com\/containers\/image\/storage\"\n\t\"github.com\/containers\/storage\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype jsonImage struct {\n\tID string `json:\"id\"`\n\tNames []string `json:\"names\"`\n}\n\ntype imageOutputParams struct {\n\tID string\n\tName string\n\tDigest string\n\tCreatedAt string\n\tSize string\n}\n\ntype filterParams struct {\n\tdangling string\n\tlabel string\n\tbeforeImage string \/\/ Images are sorted by date, so we can just output until we see the image\n\tsinceImage string \/\/ Images are sorted by date, so we can just output until we don't see the image\n\tbeforeDate time.Time\n\tsinceDate time.Time\n\treferencePattern string\n}\n\nvar (\n\timagesFlags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"quiet, q\",\n\t\t\tUsage: \"display only image IDs\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"noheading, n\",\n\t\t\tUsage: \"do not print column headings\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-trunc, notruncate\",\n\t\t\tUsage: \"do not truncate output\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"json\",\n\t\t\tUsage: \"output in JSON format\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"digests\",\n\t\t\tUsage: \"show digests\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"pretty-print images using a Go template. will override --quiet\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"filter, f\",\n\t\t\tUsage: \"filter output based on conditions provided (default [])\",\n\t\t},\n\t}\n\n\timagesDescription = \"Lists locally stored images.\"\n\timagesCommand = cli.Command{\n\t\tName: \"images\",\n\t\tUsage: \"List images in local storage\",\n\t\tDescription: imagesDescription,\n\t\tFlags: imagesFlags,\n\t\tAction: imagesCmd,\n\t\tArgsUsage: \" \",\n\t}\n)\n\nfunc imagesCmd(c *cli.Context) error {\n\tstore, err := getStore(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timages, err := store.Images()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error reading images\")\n\t}\n\n\tquiet := false\n\tif c.IsSet(\"quiet\") {\n\t\tquiet = c.Bool(\"quiet\")\n\t}\n\tnoheading := false\n\tif c.IsSet(\"noheading\") {\n\t\tnoheading = c.Bool(\"noheading\")\n\t}\n\ttruncate := true\n\tif c.IsSet(\"no-trunc\") {\n\t\ttruncate = !c.Bool(\"no-trunc\")\n\t}\n\tdigests := false\n\tif c.IsSet(\"digests\") {\n\t\tdigests = c.Bool(\"digests\")\n\t}\n\tformatString := \"\"\n\thasTemplate := false\n\tif c.IsSet(\"format\") {\n\t\tformatString = c.String(\"format\")\n\t\thasTemplate = true\n\t}\n\n\tname := \"\"\n\tif len(c.Args()) == 1 {\n\t\tname = c.Args().Get(0)\n\t} else if len(c.Args()) > 1 {\n\t\treturn errors.New(\"'buildah images' requires at most 1 argument\")\n\t}\n\tif c.IsSet(\"json\") {\n\t\tJSONImages := []jsonImage{}\n\t\tfor _, image := range images {\n\t\t\tJSONImages = append(JSONImages, jsonImage{ID: image.ID, Names: image.Names})\n\t\t}\n\t\tdata, err2 := json.MarshalIndent(JSONImages, \"\", \" \")\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", data)\n\t\treturn nil\n\t}\n\tvar params *filterParams\n\tif c.IsSet(\"filter\") {\n\t\tparams, err = parseFilter(images, c.String(\"filter\"))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error parsing filter\")\n\t\t}\n\t} else {\n\t\tparams = nil\n\t}\n\n\tif len(images) > 0 && !noheading && !quiet && !hasTemplate {\n\t\toutputHeader(truncate, digests)\n\t}\n\n\treturn outputImages(images, formatString, store, params, name, hasTemplate, truncate, digests, quiet)\n}\n\nfunc parseFilter(images []storage.Image, filter string) (*filterParams, error) {\n\tparams := new(filterParams)\n\tfilterStrings := strings.Split(filter, \",\")\n\tfor _, param := range filterStrings {\n\t\tpair := strings.SplitN(param, \"=\", 2)\n\t\tswitch strings.TrimSpace(pair[0]) {\n\t\tcase \"dangling\":\n\t\t\tif pair[1] == \"true\" || pair[1] == \"false\" {\n\t\t\t\tparams.dangling = pair[1]\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid filter: '%s=[%s]'\", pair[0], pair[1])\n\t\t\t}\n\t\tcase \"label\":\n\t\t\tparams.label = pair[1]\n\t\tcase \"before\":\n\t\t\tbeforeDate, err := setFilterDate(images, pair[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no such id: %s\", pair[0])\n\t\t\t}\n\t\t\tparams.beforeDate = beforeDate\n\t\tcase \"since\":\n\t\t\tsinceDate, err := setFilterDate(images, pair[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no such id: %s\", pair[0])\n\t\t\t}\n\t\t\tparams.sinceDate = sinceDate\n\t\tcase \"reference\":\n\t\t\tparams.referencePattern = pair[1]\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid filter: '%s'\", pair[0])\n\t\t}\n\t}\n\treturn params, nil\n}\n\nfunc setFilterDate(images []storage.Image, imgName string) (time.Time, error) {\n\tfor _, image := range images {\n\t\tfor _, name := range image.Names {\n\t\t\tif matchesReference(name, imgName) {\n\t\t\t\t\/\/ Set the date to this image\n\t\t\t\tim, err := parseMetadata(image)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn time.Time{}, errors.Wrapf(err, \"could not get creation date for image %q\", imgName)\n\t\t\t\t}\n\t\t\t\tdate := im.CreatedTime\n\t\t\t\treturn date, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn time.Time{}, fmt.Errorf(\"Could not locate image %q\", imgName)\n}\n\nfunc outputHeader(truncate, digests bool) {\n\tif truncate {\n\t\tfmt.Printf(\"%-20s %-56s \", \"IMAGE ID\", \"IMAGE NAME\")\n\t} else {\n\t\tfmt.Printf(\"%-64s %-56s \", \"IMAGE ID\", \"IMAGE NAME\")\n\t}\n\n\tif digests {\n\t\tfmt.Printf(\"%-64s \", \"DIGEST\")\n\t}\n\n\tfmt.Printf(\"%-22s %s\\n\", \"CREATED AT\", \"SIZE\")\n}\n\nfunc outputImages(images []storage.Image, format string, store storage.Store, filters *filterParams, argName string, hasTemplate, truncate, digests, quiet bool) error {\n\tfor _, image := range images {\n\t\timageMetadata, err := parseMetadata(image)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tcreatedTime := imageMetadata.CreatedTime.Format(\"Jan 2, 2006 15:04\")\n\t\tdigest := \"\"\n\t\tif len(imageMetadata.Blobs) > 0 {\n\t\t\tdigest = string(imageMetadata.Blobs[0].Digest)\n\t\t}\n\t\tsize, _ := getSize(image, store)\n\n\t\tnames := []string{\"\"}\n\t\tif len(image.Names) > 0 {\n\t\t\tnames = image.Names\n\t\t} else {\n\t\t\t\/\/ images without names should be printed with \"<none>\" as the image name\n\t\t\tnames = append(names, \"<none>\")\n\t\t}\n\t\tfor _, name := range names {\n\t\t\tif !matchesFilter(image, store, name, filters) || !matchesReference(name, argName) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif quiet {\n\t\t\t\tfmt.Printf(\"%-64s\\n\", image.ID)\n\t\t\t\t\/\/ We only want to print each id once\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tparams := imageOutputParams{\n\t\t\t\tID: image.ID,\n\t\t\t\tName: name,\n\t\t\t\tDigest: digest,\n\t\t\t\tCreatedAt: createdTime,\n\t\t\t\tSize: formattedSize(size),\n\t\t\t}\n\t\t\tif hasTemplate {\n\t\t\t\terr = outputUsingTemplate(format, params)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutputUsingFormatString(truncate, digests, params)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc matchesFilter(image storage.Image, store storage.Store, name string, params *filterParams) bool {\n\tif params == nil {\n\t\treturn true\n\t}\n\tif params.dangling != \"\" && !matchesDangling(name, params.dangling) {\n\t\treturn false\n\t} else if params.label != \"\" && !matchesLabel(image, store, params.label) {\n\t\treturn false\n\t} else if params.beforeImage != \"\" && !matchesBeforeImage(image, name, params) {\n\t\treturn false\n\t} else if params.sinceImage != \"\" && !matchesSinceImage(image, name, params) {\n\t\treturn false\n\t} else if params.referencePattern != \"\" && !matchesReference(name, params.referencePattern) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc matchesDangling(name string, dangling string) bool {\n\tif dangling == \"false\" && name != \"<none>\" {\n\t\treturn true\n\t} else if dangling == \"true\" && name == \"<none>\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc matchesLabel(image storage.Image, store storage.Store, label string) bool {\n\tstoreRef, err := is.Transport.ParseStoreReference(store, \"@\"+image.ID)\n\tif err != nil {\n\n\t}\n\timg, err := storeRef.NewImage(nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\tinfo, err := img.Inspect()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tpair := strings.SplitN(label, \"=\", 2)\n\tfor key, value := range info.Labels {\n\t\tif key == pair[0] {\n\t\t\tif len(pair) == 2 {\n\t\t\t\tif value == pair[1] {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Returns true if the image was created since the filter image. Returns\n\/\/ false otherwise\nfunc matchesBeforeImage(image storage.Image, name string, params *filterParams) bool {\n\tim, err := parseMetadata(image)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif im.CreatedTime.Before(params.beforeDate) {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Returns true if the image was created since the filter image. Returns\n\/\/ false otherwise\nfunc matchesSinceImage(image storage.Image, name string, params *filterParams) bool {\n\tim, err := parseMetadata(image)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif im.CreatedTime.After(params.sinceDate) {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc matchesID(id, argID string) bool {\n\treturn strings.HasPrefix(argID, id)\n}\n\nfunc matchesReference(name, argName string) bool {\n\tif argName == \"\" {\n\t\treturn true\n\t}\n\tsplitName := strings.Split(name, \":\")\n\t\/\/ If the arg contains a tag, we handle it differently than if it does not\n\tif strings.Contains(argName, \":\") {\n\t\tsplitArg := strings.Split(argName, \":\")\n\t\treturn strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1])\n\t}\n\treturn strings.HasSuffix(splitName[0], argName)\n}\n\nfunc formattedSize(size int64) string {\n\tsuffixes := [5]string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\"}\n\n\tcount := 0\n\tformattedSize := float64(size)\n\tfor formattedSize >= 1024 && count < 4 {\n\t\tformattedSize \/= 1024\n\t\tcount++\n\t}\n\treturn fmt.Sprintf(\"%.4g %s\", formattedSize, suffixes[count])\n}\n\nfunc outputUsingTemplate(format string, params imageOutputParams) error {\n\ttmpl, err := template.New(\"image\").Parse(format)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Template parsing error\")\n\t}\n\n\terr = tmpl.Execute(os.Stdout, params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println()\n\treturn nil\n}\n\nfunc outputUsingFormatString(truncate, digests bool, params imageOutputParams) {\n\tif truncate {\n\t\tfmt.Printf(\"%-20.12s %-56s\", params.ID, params.Name)\n\t} else {\n\t\tfmt.Printf(\"%-64s %-56s\", params.ID, params.Name)\n\t}\n\n\tif digests {\n\t\tfmt.Printf(\" %-64s\", params.Digest)\n\t}\n\tfmt.Printf(\" %-22s %s\\n\", params.CreatedAt, params.Size)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/clawio\/clawiod\/root\"\n\t\"github.com\/go-kit\/kit\/log\/levels\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"io\"\n\t\"net\/http\"\n)\n\ntype server struct {\n\tlogger levels.Levels\n\trouter http.Handler\n\tconfig root.Configuration\n\thttpLogger io.Writer\n}\n\nfunc newServer(config root.Configuration) (*server, error) {\n\tlogger, err := getLogger(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := &server{logger: logger, config: config}\n\terr = s.configureRouter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\nfunc (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\thandlers.CombinedLoggingHandler(s.httpLogger, s.router).ServeHTTP(w, r)\n}\n\nfunc (s *server) configureRouter() error {\n\tconfig := s.config\n\n\thttpLogger, err := getHTTPLogger(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\ts.httpLogger = httpLogger\n\n\tloggerMiddleware, err := getLoggerMiddleware(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\n\tcorsMiddleware, err := getCORSMiddleware(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\n\twebServices, err := getWebServices(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\ts.logger.Info().Log(\"msg\", \"web services enabled\", \"webservices\", config.GetEnabledWebServices())\n\n\trouter := mux.NewRouter()\n\tfor key, service := range webServices {\n\t\ts.logger.Info().Log(\"msg\", key+\" web service enabled\")\n\t\tfor path, methods := range service.Endpoints() {\n\t\t\tfor method, handlerFunc := range methods {\n\t\t\t\thandlerFunc = loggerMiddleware.HandlerFunc(handlerFunc)\n\t\t\t\thandlerFunc := http.HandlerFunc(handlerFunc)\n\t\t\t\tvar handler http.Handler\n\t\t\t\tif config.IsCORSMiddlewareEnabled() {\n\t\t\t\t\thandler = handlerFunc\n\t\t\t\t\thandler = corsMiddleware.Handler(handler)\n\t\t\t\t\trouter.Handle(path, handler).Methods(method)\n\t\t\t\t\tprometheus.InstrumentHandler(path, handler)\n\t\t\t\t\ts.logger.Info().Log(\"method\", method, \"endpoint\", path, \"msg\", \"endpoint available\")\n\t\t\t\t\trouter.Handle(path, handler).Methods(\"OPTIONS\")\n\t\t\t\t\ts.logger.Info().Log(\"method\", \"OPTIONS\", \"endpoint\", path, \"msg\", \"endpoint available - created by corsmiddleware\")\n\t\t\t\t} else {\n\t\t\t\t\thandler = handlerFunc\n\t\t\t\t\trouter.Handle(path, handler).Methods(method)\n\t\t\t\t\tprometheus.InstrumentHandler(path, handler)\n\t\t\t\t\ts.logger.Info().Log(\"method\", method, \"endpoint\", path, \"msg\", \"endpoint available\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ts.router = router\n\treturn nil\n}\n<commit_msg>Add prometheus handler<commit_after>package main\n\nimport (\n\t\"github.com\/clawio\/clawiod\/root\"\n\t\"github.com\/go-kit\/kit\/log\/levels\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"io\"\n\t\"net\/http\"\n)\n\ntype server struct {\n\tlogger levels.Levels\n\trouter http.Handler\n\tconfig root.Configuration\n\thttpLogger io.Writer\n}\n\nfunc newServer(config root.Configuration) (*server, error) {\n\tlogger, err := getLogger(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := &server{logger: logger, config: config}\n\terr = s.configureRouter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\nfunc (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\thandlers.CombinedLoggingHandler(s.httpLogger, s.router).ServeHTTP(w, r)\n}\n\nfunc (s *server) configureRouter() error {\n\tconfig := s.config\n\n\thttpLogger, err := getHTTPLogger(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\ts.httpLogger = httpLogger\n\n\tloggerMiddleware, err := getLoggerMiddleware(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\n\tcorsMiddleware, err := getCORSMiddleware(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\n\twebServices, err := getWebServices(config)\n\tif err != nil {\n\t\ts.logger.Error().Log(\"error\", err)\n\t\treturn err\n\t}\n\ts.logger.Info().Log(\"msg\", \"web services enabled\", \"webservices\", config.GetEnabledWebServices())\n\n\trouter := mux.NewRouter()\n\trouter.Handle(\"\/metrics\", prometheus.Handler()).Methods(\"GET\")\n\ts.logger.Info().Log(\"method\", \"GET\", \"endpoint\", \"\/metrics\", \"msg\", \"endpoint available - created by prometheus\")\n\tfor key, service := range webServices {\n\t\ts.logger.Info().Log(\"msg\", key+\" web service enabled\")\n\t\tfor path, methods := range service.Endpoints() {\n\t\t\tfor method, handlerFunc := range methods {\n\t\t\t\thandlerFunc = loggerMiddleware.HandlerFunc(handlerFunc)\n\t\t\t\thandlerFunc := http.HandlerFunc(handlerFunc)\n\t\t\t\tvar handler http.Handler\n\t\t\t\tif config.IsCORSMiddlewareEnabled() {\n\t\t\t\t\thandler = handlerFunc\n\t\t\t\t\thandler = corsMiddleware.Handler(handler)\n\t\t\t\t\trouter.Handle(path, handler).Methods(method)\n\t\t\t\t\tprometheus.InstrumentHandler(path, handler)\n\t\t\t\t\ts.logger.Info().Log(\"method\", method, \"endpoint\", path, \"msg\", \"endpoint available\")\n\t\t\t\t\trouter.Handle(path, handler).Methods(\"OPTIONS\")\n\t\t\t\t\ts.logger.Info().Log(\"method\", \"OPTIONS\", \"endpoint\", path, \"msg\", \"endpoint available - created by corsmiddleware\")\n\t\t\t\t} else {\n\t\t\t\t\thandler = handlerFunc\n\t\t\t\t\trouter.Handle(path, handler).Methods(method)\n\t\t\t\t\tprometheus.InstrumentHandler(path, handler)\n\t\t\t\t\ts.logger.Info().Log(\"method\", method, \"endpoint\", path, \"msg\", \"endpoint available\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ts.router = router\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package rpc\n\nimport (\n\t\"encoding\/gob\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"io\"\n\t\"log\"\n\t\"net\/rpc\"\n)\n\n\/\/ An implementation of packer.Communicator where the communicator is actually\n\/\/ executed over an RPC connection.\ntype communicator struct {\n\tclient *rpc.Client\n\tmux *MuxConn\n}\n\n\/\/ CommunicatorServer wraps a packer.Communicator implementation and makes\n\/\/ it exportable as part of a Golang RPC server.\ntype CommunicatorServer struct {\n\tc packer.Communicator\n\tmux *MuxConn\n}\n\ntype CommandFinished struct {\n\tExitStatus int\n}\n\ntype CommunicatorStartArgs struct {\n\tCommand string\n\tStdinStreamId uint32\n\tStdoutStreamId uint32\n\tStderrStreamId uint32\n\tResponseStreamId uint32\n}\n\ntype CommunicatorDownloadArgs struct {\n\tPath string\n\tWriterStreamId uint32\n}\n\ntype CommunicatorUploadArgs struct {\n\tPath string\n\tReaderStreamId uint32\n}\n\ntype CommunicatorUploadDirArgs struct {\n\tDst string\n\tSrc string\n\tExclude []string\n}\n\nfunc Communicator(client *rpc.Client) *communicator {\n\treturn &communicator{client: client}\n}\n\nfunc (c *communicator) Start(cmd *packer.RemoteCmd) (err error) {\n\tvar args CommunicatorStartArgs\n\targs.Command = cmd.Command\n\n\tif cmd.Stdin != nil {\n\t\targs.StdinStreamId = c.mux.NextId()\n\t\tgo serveSingleCopy(\"stdin\", c.mux, args.StdinStreamId, nil, cmd.Stdin)\n\t}\n\n\tif cmd.Stdout != nil {\n\t\targs.StdoutStreamId = c.mux.NextId()\n\t\tgo serveSingleCopy(\"stdout\", c.mux, args.StdoutStreamId, cmd.Stdout, nil)\n\t}\n\n\tif cmd.Stderr != nil {\n\t\targs.StderrStreamId = c.mux.NextId()\n\t\tgo serveSingleCopy(\"stderr\", c.mux, args.StderrStreamId, cmd.Stderr, nil)\n\t}\n\n\tresponseStreamId := c.mux.NextId()\n\targs.ResponseStreamId = responseStreamId\n\n\tgo func() {\n\t\tconn, err := c.mux.Accept(responseStreamId)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERR] Error accepting response stream %d: %s\",\n\t\t\t\tresponseStreamId, err)\n\t\t\tcmd.SetExited(123)\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tvar finished CommandFinished\n\t\tdecoder := gob.NewDecoder(conn)\n\t\tif err := decoder.Decode(&finished); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error decoding response stream %d: %s\",\n\t\t\t\tresponseStreamId, err)\n\t\t\tcmd.SetExited(123)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"[INFO] RPC client: Communicator ended with: %d\", finished.ExitStatus)\n\t\tcmd.SetExited(finished.ExitStatus)\n\t}()\n\n\terr = c.client.Call(\"Communicator.Start\", &args, new(interface{}))\n\treturn\n}\n\nfunc (c *communicator) Upload(path string, r io.Reader) (err error) {\n\t\/\/ Pipe the reader through to the connection\n\tstreamId := c.mux.NextId()\n\tgo serveSingleCopy(\"uploadReader\", c.mux, streamId, nil, r)\n\n\targs := CommunicatorUploadArgs{\n\t\tPath: path,\n\t\tReaderStreamId: streamId,\n\t}\n\n\terr = c.client.Call(\"Communicator.Upload\", &args, new(interface{}))\n\treturn\n}\n\nfunc (c *communicator) UploadDir(dst string, src string, exclude []string) error {\n\targs := &CommunicatorUploadDirArgs{\n\t\tDst: dst,\n\t\tSrc: src,\n\t\tExclude: exclude,\n\t}\n\n\tvar reply error\n\terr := c.client.Call(\"Communicator.UploadDir\", args, &reply)\n\tif err == nil {\n\t\terr = reply\n\t}\n\n\treturn err\n}\n\nfunc (c *communicator) Download(path string, w io.Writer) (err error) {\n\t\/\/ Serve a single connection and a single copy\n\tstreamId := c.mux.NextId()\n\tgo serveSingleCopy(\"downloadWriter\", c.mux, streamId, w, nil)\n\n\targs := CommunicatorDownloadArgs{\n\t\tPath: path,\n\t\tWriterStreamId: streamId,\n\t}\n\n\terr = c.client.Call(\"Communicator.Download\", &args, new(interface{}))\n\treturn\n}\n\nfunc (c *CommunicatorServer) Start(args *CommunicatorStartArgs, reply *interface{}) (error) {\n\t\/\/ Build the RemoteCmd on this side so that it all pipes over\n\t\/\/ to the remote side.\n\tvar cmd packer.RemoteCmd\n\tcmd.Command = args.Command\n\n\t\/\/ Create a channel to signal we're done so that we can close\n\t\/\/ our stdin\/stdout\/stderr streams\n\ttoClose := make([]io.Closer, 0)\n\tdoneCh := make(chan struct{})\n\tgo func() {\n\t\t<-doneCh\n\t\tfor _, conn := range toClose {\n\t\t\tdefer conn.Close()\n\t\t}\n\t}()\n\n\tif args.StdinStreamId > 0 {\n\t\tconn, err := c.mux.Dial(args.StdinStreamId)\n\t\tif err != nil {\n\t\t\tclose(doneCh)\n\t\t\treturn NewBasicError(err)\n\t\t}\n\n\t\ttoClose = append(toClose, conn)\n\t\tcmd.Stdin = conn\n\t}\n\n\tif args.StdoutStreamId > 0 {\n\t\tconn, err := c.mux.Dial(args.StdoutStreamId)\n\t\tif err != nil {\n\t\t\tclose(doneCh)\n\t\t\treturn NewBasicError(err)\n\t\t}\n\n\t\ttoClose = append(toClose, conn)\n\t\tcmd.Stdout = conn\n\t}\n\n\tif args.StderrStreamId > 0 {\n\t\tconn, err := c.mux.Dial(args.StderrStreamId)\n\t\tif err != nil {\n\t\t\tclose(doneCh)\n\t\t\treturn NewBasicError(err)\n\t\t}\n\n\t\ttoClose = append(toClose, conn)\n\t\tcmd.Stderr = conn\n\t}\n\n\n\t\/\/ Connect to the response address so we can write our result to it\n\t\/\/ when ready.\n\tresponseC, err := c.mux.Dial(args.ResponseStreamId)\n\tif err != nil {\n\t\tclose(doneCh)\n\t\treturn NewBasicError(err)\n\t}\n\tresponseWriter := gob.NewEncoder(responseC)\n\n\t\/\/ Start the actual command\n\terr = c.c.Start(&cmd)\n\tif err != nil {\n\t\tclose(doneCh)\n\t\treturn NewBasicError(err)\n\t}\n\n\t\/\/ Start a goroutine to spin and wait for the process to actual\n\t\/\/ exit. When it does, report it back to caller...\n\tgo func() {\n\t\tdefer close(doneCh)\n\t\tdefer responseC.Close()\n\t\tcmd.Wait()\n\t\tlog.Printf(\"[INFO] RPC endpoint: Communicator ended with: %d\", cmd.ExitStatus)\n\t\tresponseWriter.Encode(&CommandFinished{cmd.ExitStatus})\n\t}()\n\n\treturn nil\n}\n\nfunc (c *CommunicatorServer) Upload(args *CommunicatorUploadArgs, reply *interface{}) (err error) {\n\treaderC, err := c.mux.Dial(args.ReaderStreamId)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer readerC.Close()\n\n\terr = c.c.Upload(args.Path, readerC)\n\treturn\n}\n\nfunc (c *CommunicatorServer) UploadDir(args *CommunicatorUploadDirArgs, reply *error) error {\n\treturn c.c.UploadDir(args.Dst, args.Src, args.Exclude)\n}\n\nfunc (c *CommunicatorServer) Download(args *CommunicatorDownloadArgs, reply *interface{}) (err error) {\n\twriterC, err := c.mux.Dial(args.WriterStreamId)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer writerC.Close()\n\n\terr = c.c.Download(args.Path, writerC)\n\treturn\n}\n\nfunc serveSingleCopy(name string, mux *MuxConn, id uint32, dst io.Writer, src io.Reader) {\n\tconn, err := mux.Accept(id)\n\tif err != nil {\n\t\tlog.Printf(\"'%s' accept error: %s\", name, err)\n\t\treturn\n\t}\n\n\t\/\/ Be sure to close the connection after we're done copying so\n\t\/\/ that an EOF will successfully be sent to the remote side\n\tdefer conn.Close()\n\n\t\/\/ The connection is the destination\/source that is nil\n\tif dst == nil {\n\t\tdst = conn\n\t} else {\n\t\tsrc = conn\n\t}\n\n\twritten, err := io.Copy(dst, src)\n\tlog.Printf(\"%d bytes written for '%s'\", written, name)\n\tif err != nil {\n\t\tlog.Printf(\"'%s' copy error: %s\", name, err)\n\t}\n}\n<commit_msg>packer\/rpc: rename uploadReader to uploadData because that makes sense<commit_after>package rpc\n\nimport (\n\t\"encoding\/gob\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"io\"\n\t\"log\"\n\t\"net\/rpc\"\n)\n\n\/\/ An implementation of packer.Communicator where the communicator is actually\n\/\/ executed over an RPC connection.\ntype communicator struct {\n\tclient *rpc.Client\n\tmux *MuxConn\n}\n\n\/\/ CommunicatorServer wraps a packer.Communicator implementation and makes\n\/\/ it exportable as part of a Golang RPC server.\ntype CommunicatorServer struct {\n\tc packer.Communicator\n\tmux *MuxConn\n}\n\ntype CommandFinished struct {\n\tExitStatus int\n}\n\ntype CommunicatorStartArgs struct {\n\tCommand string\n\tStdinStreamId uint32\n\tStdoutStreamId uint32\n\tStderrStreamId uint32\n\tResponseStreamId uint32\n}\n\ntype CommunicatorDownloadArgs struct {\n\tPath string\n\tWriterStreamId uint32\n}\n\ntype CommunicatorUploadArgs struct {\n\tPath string\n\tReaderStreamId uint32\n}\n\ntype CommunicatorUploadDirArgs struct {\n\tDst string\n\tSrc string\n\tExclude []string\n}\n\nfunc Communicator(client *rpc.Client) *communicator {\n\treturn &communicator{client: client}\n}\n\nfunc (c *communicator) Start(cmd *packer.RemoteCmd) (err error) {\n\tvar args CommunicatorStartArgs\n\targs.Command = cmd.Command\n\n\tif cmd.Stdin != nil {\n\t\targs.StdinStreamId = c.mux.NextId()\n\t\tgo serveSingleCopy(\"stdin\", c.mux, args.StdinStreamId, nil, cmd.Stdin)\n\t}\n\n\tif cmd.Stdout != nil {\n\t\targs.StdoutStreamId = c.mux.NextId()\n\t\tgo serveSingleCopy(\"stdout\", c.mux, args.StdoutStreamId, cmd.Stdout, nil)\n\t}\n\n\tif cmd.Stderr != nil {\n\t\targs.StderrStreamId = c.mux.NextId()\n\t\tgo serveSingleCopy(\"stderr\", c.mux, args.StderrStreamId, cmd.Stderr, nil)\n\t}\n\n\tresponseStreamId := c.mux.NextId()\n\targs.ResponseStreamId = responseStreamId\n\n\tgo func() {\n\t\tconn, err := c.mux.Accept(responseStreamId)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERR] Error accepting response stream %d: %s\",\n\t\t\t\tresponseStreamId, err)\n\t\t\tcmd.SetExited(123)\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tvar finished CommandFinished\n\t\tdecoder := gob.NewDecoder(conn)\n\t\tif err := decoder.Decode(&finished); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error decoding response stream %d: %s\",\n\t\t\t\tresponseStreamId, err)\n\t\t\tcmd.SetExited(123)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"[INFO] RPC client: Communicator ended with: %d\", finished.ExitStatus)\n\t\tcmd.SetExited(finished.ExitStatus)\n\t}()\n\n\terr = c.client.Call(\"Communicator.Start\", &args, new(interface{}))\n\treturn\n}\n\nfunc (c *communicator) Upload(path string, r io.Reader) (err error) {\n\t\/\/ Pipe the reader through to the connection\n\tstreamId := c.mux.NextId()\n\tgo serveSingleCopy(\"uploadData\", c.mux, streamId, nil, r)\n\n\targs := CommunicatorUploadArgs{\n\t\tPath: path,\n\t\tReaderStreamId: streamId,\n\t}\n\n\terr = c.client.Call(\"Communicator.Upload\", &args, new(interface{}))\n\treturn\n}\n\nfunc (c *communicator) UploadDir(dst string, src string, exclude []string) error {\n\targs := &CommunicatorUploadDirArgs{\n\t\tDst: dst,\n\t\tSrc: src,\n\t\tExclude: exclude,\n\t}\n\n\tvar reply error\n\terr := c.client.Call(\"Communicator.UploadDir\", args, &reply)\n\tif err == nil {\n\t\terr = reply\n\t}\n\n\treturn err\n}\n\nfunc (c *communicator) Download(path string, w io.Writer) (err error) {\n\t\/\/ Serve a single connection and a single copy\n\tstreamId := c.mux.NextId()\n\tgo serveSingleCopy(\"downloadWriter\", c.mux, streamId, w, nil)\n\n\targs := CommunicatorDownloadArgs{\n\t\tPath: path,\n\t\tWriterStreamId: streamId,\n\t}\n\n\terr = c.client.Call(\"Communicator.Download\", &args, new(interface{}))\n\treturn\n}\n\nfunc (c *CommunicatorServer) Start(args *CommunicatorStartArgs, reply *interface{}) (error) {\n\t\/\/ Build the RemoteCmd on this side so that it all pipes over\n\t\/\/ to the remote side.\n\tvar cmd packer.RemoteCmd\n\tcmd.Command = args.Command\n\n\t\/\/ Create a channel to signal we're done so that we can close\n\t\/\/ our stdin\/stdout\/stderr streams\n\ttoClose := make([]io.Closer, 0)\n\tdoneCh := make(chan struct{})\n\tgo func() {\n\t\t<-doneCh\n\t\tfor _, conn := range toClose {\n\t\t\tdefer conn.Close()\n\t\t}\n\t}()\n\n\tif args.StdinStreamId > 0 {\n\t\tconn, err := c.mux.Dial(args.StdinStreamId)\n\t\tif err != nil {\n\t\t\tclose(doneCh)\n\t\t\treturn NewBasicError(err)\n\t\t}\n\n\t\ttoClose = append(toClose, conn)\n\t\tcmd.Stdin = conn\n\t}\n\n\tif args.StdoutStreamId > 0 {\n\t\tconn, err := c.mux.Dial(args.StdoutStreamId)\n\t\tif err != nil {\n\t\t\tclose(doneCh)\n\t\t\treturn NewBasicError(err)\n\t\t}\n\n\t\ttoClose = append(toClose, conn)\n\t\tcmd.Stdout = conn\n\t}\n\n\tif args.StderrStreamId > 0 {\n\t\tconn, err := c.mux.Dial(args.StderrStreamId)\n\t\tif err != nil {\n\t\t\tclose(doneCh)\n\t\t\treturn NewBasicError(err)\n\t\t}\n\n\t\ttoClose = append(toClose, conn)\n\t\tcmd.Stderr = conn\n\t}\n\n\n\t\/\/ Connect to the response address so we can write our result to it\n\t\/\/ when ready.\n\tresponseC, err := c.mux.Dial(args.ResponseStreamId)\n\tif err != nil {\n\t\tclose(doneCh)\n\t\treturn NewBasicError(err)\n\t}\n\tresponseWriter := gob.NewEncoder(responseC)\n\n\t\/\/ Start the actual command\n\terr = c.c.Start(&cmd)\n\tif err != nil {\n\t\tclose(doneCh)\n\t\treturn NewBasicError(err)\n\t}\n\n\t\/\/ Start a goroutine to spin and wait for the process to actual\n\t\/\/ exit. When it does, report it back to caller...\n\tgo func() {\n\t\tdefer close(doneCh)\n\t\tdefer responseC.Close()\n\t\tcmd.Wait()\n\t\tlog.Printf(\"[INFO] RPC endpoint: Communicator ended with: %d\", cmd.ExitStatus)\n\t\tresponseWriter.Encode(&CommandFinished{cmd.ExitStatus})\n\t}()\n\n\treturn nil\n}\n\nfunc (c *CommunicatorServer) Upload(args *CommunicatorUploadArgs, reply *interface{}) (err error) {\n\treaderC, err := c.mux.Dial(args.ReaderStreamId)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer readerC.Close()\n\n\terr = c.c.Upload(args.Path, readerC)\n\treturn\n}\n\nfunc (c *CommunicatorServer) UploadDir(args *CommunicatorUploadDirArgs, reply *error) error {\n\treturn c.c.UploadDir(args.Dst, args.Src, args.Exclude)\n}\n\nfunc (c *CommunicatorServer) Download(args *CommunicatorDownloadArgs, reply *interface{}) (err error) {\n\twriterC, err := c.mux.Dial(args.WriterStreamId)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer writerC.Close()\n\n\terr = c.c.Download(args.Path, writerC)\n\treturn\n}\n\nfunc serveSingleCopy(name string, mux *MuxConn, id uint32, dst io.Writer, src io.Reader) {\n\tconn, err := mux.Accept(id)\n\tif err != nil {\n\t\tlog.Printf(\"'%s' accept error: %s\", name, err)\n\t\treturn\n\t}\n\n\t\/\/ Be sure to close the connection after we're done copying so\n\t\/\/ that an EOF will successfully be sent to the remote side\n\tdefer conn.Close()\n\n\t\/\/ The connection is the destination\/source that is nil\n\tif dst == nil {\n\t\tdst = conn\n\t} else {\n\t\tsrc = conn\n\t}\n\n\twritten, err := io.Copy(dst, src)\n\tlog.Printf(\"%d bytes written for '%s'\", written, name)\n\tif err != nil {\n\t\tlog.Printf(\"'%s' copy error: %s\", name, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\nconst (\n\t\/\/ DefaultPointBatchSize represents the number of points to batch together.\n\tDefaultPointBatchSize = 100\n\n\t\/\/ DefaultPointBatchSize represents the number of writes to batch together.\n\tDefaultWriteBatchSize = 10 * 1024 * 1024 \/\/ 10MB\n\n\t\/\/ DefaultConcurrentShardQueryLimit represents the number of shards that\n\t\/\/ can be queried concurrently at one time.\n\tDefaultConcurrentShardQueryLimit = 10\n\n\t\/\/ DefaultAPIReadTimeout represents the amount time before an API request\n\t\/\/ times out.\n\tDefaultAPIReadTimeout = 5 * time.Second\n)\n\n\/\/ Config represents the configuration format for the influxd binary.\ntype Config struct {\n\tHostname string `toml:\"hostname\"`\n\tBindAddress string `toml:\"bind-address\"`\n\tReportingDisabled bool `toml:\"reporting-disabled\"`\n\tVersion string `toml:\"-\"`\n\tInfluxDBVersion string `toml:\"-\"`\n\n\tAdmin struct {\n\t\tPort int `toml:\"port\"`\n\t\tAssets string `toml:\"assets\"`\n\t} `toml:\"admin\"`\n\n\tHTTPAPI struct {\n\t\tPort int `toml:\"port\"`\n\t\tSSLPort int `toml:\"ssl-port\"`\n\t\tSSLCertPath string `toml:\"ssl-cert\"`\n\t\tReadTimeout Duration `toml:\"read-timeout\"`\n\t} `toml:\"api\"`\n\n\tInputPlugins struct {\n\t\tGraphite struct {\n\t\t\tEnabled bool `toml:\"enabled\"`\n\t\t\tPort int `toml:\"port\"`\n\t\t\tDatabase string `toml:\"database\"`\n\t\t\tUDPEnabled bool `toml:\"udp_enabled\"`\n\t\t} `toml:\"graphite\"`\n\t\tUDPInput struct {\n\t\t\tEnabled bool `toml:\"enabled\"`\n\t\t\tPort int `toml:\"port\"`\n\t\t\tDatabase string `toml:\"database\"`\n\t\t} `toml:\"udp\"`\n\t\tUDPServersInput []struct {\n\t\t\tEnabled bool `toml:\"enabled\"`\n\t\t\tPort int `toml:\"port\"`\n\t\t\tDatabase string `toml:\"database\"`\n\t\t} `toml:\"udp_servers\"`\n\t} `toml:\"input_plugins\"`\n\n\tRaft struct {\n\t\tPort int `toml:\"port\"`\n\t\tDir string `toml:\"dir\"`\n\t\tTimeout Duration `toml:\"election-timeout\"`\n\t} `toml:\"raft\"`\n\n\tStorage struct {\n\t\tDir string `toml:\"dir\"`\n\t\tWriteBufferSize int `toml:\"write-buffer-size\"`\n\t\tMaxOpenShards int `toml:\"max-open-shards\"`\n\t\tPointBatchSize int `toml:\"point-batch-size\"`\n\t\tWriteBatchSize int `toml:\"write-batch-size\"`\n\t\tEngines map[string]toml.Primitive `toml:\"engines\"`\n\t\tRetentionSweepPeriod Duration `toml:\"retention-sweep-period\"`\n\t} `toml:\"storage\"`\n\n\tCluster struct {\n\t\tDir string `toml:\"dir\"`\n\t\tProtobufPort int `toml:\"protobuf_port\"`\n\t\tProtobufTimeout Duration `toml:\"protobuf_timeout\"`\n\t\tProtobufHeartbeatInterval Duration `toml:\"protobuf_heartbeat\"`\n\t\tMinBackoff Duration `toml:\"protobuf_min_backoff\"`\n\t\tMaxBackoff Duration `toml:\"protobuf_max_backoff\"`\n\t\tWriteBufferSize int `toml:\"write-buffer-size\"`\n\t\tConcurrentShardQueryLimit int `toml:\"concurrent-shard-query-limit\"`\n\t\tMaxResponseBufferSize int `toml:\"max-response-buffer-size\"`\n\t} `toml:\"cluster\"`\n\n\tLogging struct {\n\t\tFile string `toml:\"file\"`\n\t\tLevel string `toml:\"level\"`\n\t} `toml:\"logging\"`\n}\n\n\/\/ NewConfig returns an instance of Config with reasonable defaults.\nfunc NewConfig() *Config {\n\tc := &Config{}\n\tc.Storage.RetentionSweepPeriod = Duration(10 * time.Minute)\n\tc.Cluster.ConcurrentShardQueryLimit = DefaultConcurrentShardQueryLimit\n\tc.Raft.Timeout = Duration(1 * time.Second)\n\tc.HTTPAPI.ReadTimeout = Duration(DefaultAPIReadTimeout)\n\tc.Cluster.MinBackoff = Duration(1 * time.Second)\n\tc.Cluster.MaxBackoff = Duration(10 * time.Second)\n\tc.Cluster.ProtobufHeartbeatInterval = Duration(10 * time.Millisecond)\n\tc.Storage.WriteBufferSize = 1000\n\tc.Cluster.WriteBufferSize = 1000\n\tc.Cluster.MaxResponseBufferSize = 100\n\n\t\/\/ Detect hostname (or set to localhost).\n\tif c.Hostname, _ = os.Hostname(); c.Hostname == \"\" {\n\t\tc.Hostname = \"localhost\"\n\t}\n\n\t\/\/ FIX(benbjohnson): Append where the udp servers are actually used.\n\t\/\/ config.UDPServers = append(config.UDPServers, UDPInputConfig{\n\t\/\/ \tEnabled: tomlConfiguration.InputPlugins.UDPInput.Enabled,\n\t\/\/ \tDatabase: tomlConfiguration.InputPlugins.UDPInput.Database,\n\t\/\/ \tPort: tomlConfiguration.InputPlugins.UDPInput.Port,\n\t\/\/ })\n\n\treturn c\n}\n\n\/\/ PointBatchSize returns the storage point batch size, if set.\n\/\/ If not set, the LevelDB point batch size is returned.\n\/\/ If that is not set then the default point batch size is returned.\nfunc (c *Config) PointBatchSize() int {\n\tif c.Storage.PointBatchSize != 0 {\n\t\treturn c.Storage.PointBatchSize\n\t}\n\treturn DefaultPointBatchSize\n}\n\n\/\/ WriteBatchSize returns the storage write batch size, if set.\n\/\/ If not set, the LevelDB write batch size is returned.\n\/\/ If that is not set then the default write batch size is returned.\nfunc (c *Config) WriteBatchSize() int {\n\tif c.Storage.WriteBatchSize != 0 {\n\t\treturn c.Storage.WriteBatchSize\n\t}\n\treturn DefaultWriteBatchSize\n}\n\n\/\/ MaxOpenShards returns the maximum number of shards to keep open at once.\nfunc (c *Config) MaxOpenShards() int {\n\treturn c.Storage.MaxOpenShards\n}\n\n\/\/ ApiHTTPListenAddr returns the binding address the API HTTP server\nfunc (c *Config) ApiHTTPListenAddr() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.HTTPAPI.Port)\n}\n\n\/\/ RaftListenAddr returns the binding address the Raft server\nfunc (c *Config) RaftListenAddr() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.Raft.Port)\n}\n\n\/\/ RaftConnectionString returns the address required to contact the Raft server\nfunc (c *Config) RaftConnectionString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.Hostname, c.Raft.Port)\n}\n\n\/\/ Size represents a TOML parseable file size.\n\/\/ Users can specify size using \"m\" for megabytes and \"g\" for gigabytes.\ntype Size int\n\n\/\/ UnmarshalText parses a byte size from text.\nfunc (s *Size) UnmarshalText(text []byte) error {\n\t\/\/ Parse numeric portion of value.\n\tlength := len(string(text))\n\tsize, err := strconv.ParseInt(string(text[:length-1]), 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Parse unit of measure (\"m\", \"g\", etc).\n\tswitch suffix := text[len(text)-1]; suffix {\n\tcase 'm':\n\t\tsize *= 1 << 20 \/\/ MB\n\tcase 'g':\n\t\tsize *= 1 << 30 \/\/ GB\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown size suffix: %c\", suffix)\n\t}\n\n\t\/\/ Check for overflow.\n\tif size > maxInt {\n\t\treturn fmt.Errorf(\"size %d cannot be represented by an int\", size)\n\t}\n\n\t*s = Size(size)\n\treturn nil\n}\n\n\/\/ Duration is a TOML wrapper type for time.Duration.\ntype Duration time.Duration\n\n\/\/ UnmarshalText parses a TOML value into a duration value.\nfunc (d *Duration) UnmarshalText(text []byte) error {\n\t\/\/ Ignore if there is no value set.\n\tif len(text) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Otherwise parse as a duration formatted string.\n\tduration, err := time.ParseDuration(string(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set duration and return.\n\t*d = Duration(duration)\n\treturn nil\n}\n\n\/\/ ParseConfigFile parses a configuration file at a given path.\nfunc ParseConfigFile(path string) (*Config, error) {\n\tc := NewConfig()\n\tif _, err := toml.DecodeFile(path, &c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\n\/\/ ParseConfig parses a configuration string into a config object.\nfunc ParseConfig(s string) (*Config, error) {\n\tc := NewConfig()\n\tif _, err := toml.Decode(s, &c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\n\/*\nfunc (c *Config) AdminHTTPPortString() string {\n\tif c.AdminHTTPPort <= 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.AdminHTTPPort)\n}\n\nfunc (c *Config) APIHTTPSPortString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.APIHTTPSPort)\n}\n\nfunc (c *Config) GraphitePortString() string {\n\tif c.GraphitePort <= 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.GraphitePort)\n}\n\nfunc (c *Config) UDPInputPortString(port int) string {\n\tif port <= 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, port)\n}\n\nfunc (c *Config) ProtobufConnectionString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.Hostname, c.ProtobufPort)\n}\n\nfunc (c *Config) ProtobufListenString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.ProtobufPort)\n}\n\n*\/\n\n\/\/ maxInt is the largest integer representable by a word (architeture dependent).\nconst maxInt = int64(^uint(0) >> 1)\n<commit_msg>Add protocol to Raft connection string<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\nconst (\n\t\/\/ DefaultPointBatchSize represents the number of points to batch together.\n\tDefaultPointBatchSize = 100\n\n\t\/\/ DefaultPointBatchSize represents the number of writes to batch together.\n\tDefaultWriteBatchSize = 10 * 1024 * 1024 \/\/ 10MB\n\n\t\/\/ DefaultConcurrentShardQueryLimit represents the number of shards that\n\t\/\/ can be queried concurrently at one time.\n\tDefaultConcurrentShardQueryLimit = 10\n\n\t\/\/ DefaultAPIReadTimeout represents the amount time before an API request\n\t\/\/ times out.\n\tDefaultAPIReadTimeout = 5 * time.Second\n)\n\n\/\/ Config represents the configuration format for the influxd binary.\ntype Config struct {\n\tHostname string `toml:\"hostname\"`\n\tBindAddress string `toml:\"bind-address\"`\n\tReportingDisabled bool `toml:\"reporting-disabled\"`\n\tVersion string `toml:\"-\"`\n\tInfluxDBVersion string `toml:\"-\"`\n\n\tAdmin struct {\n\t\tPort int `toml:\"port\"`\n\t\tAssets string `toml:\"assets\"`\n\t} `toml:\"admin\"`\n\n\tHTTPAPI struct {\n\t\tPort int `toml:\"port\"`\n\t\tSSLPort int `toml:\"ssl-port\"`\n\t\tSSLCertPath string `toml:\"ssl-cert\"`\n\t\tReadTimeout Duration `toml:\"read-timeout\"`\n\t} `toml:\"api\"`\n\n\tInputPlugins struct {\n\t\tGraphite struct {\n\t\t\tEnabled bool `toml:\"enabled\"`\n\t\t\tPort int `toml:\"port\"`\n\t\t\tDatabase string `toml:\"database\"`\n\t\t\tUDPEnabled bool `toml:\"udp_enabled\"`\n\t\t} `toml:\"graphite\"`\n\t\tUDPInput struct {\n\t\t\tEnabled bool `toml:\"enabled\"`\n\t\t\tPort int `toml:\"port\"`\n\t\t\tDatabase string `toml:\"database\"`\n\t\t} `toml:\"udp\"`\n\t\tUDPServersInput []struct {\n\t\t\tEnabled bool `toml:\"enabled\"`\n\t\t\tPort int `toml:\"port\"`\n\t\t\tDatabase string `toml:\"database\"`\n\t\t} `toml:\"udp_servers\"`\n\t} `toml:\"input_plugins\"`\n\n\tRaft struct {\n\t\tPort int `toml:\"port\"`\n\t\tDir string `toml:\"dir\"`\n\t\tTimeout Duration `toml:\"election-timeout\"`\n\t} `toml:\"raft\"`\n\n\tStorage struct {\n\t\tDir string `toml:\"dir\"`\n\t\tWriteBufferSize int `toml:\"write-buffer-size\"`\n\t\tMaxOpenShards int `toml:\"max-open-shards\"`\n\t\tPointBatchSize int `toml:\"point-batch-size\"`\n\t\tWriteBatchSize int `toml:\"write-batch-size\"`\n\t\tEngines map[string]toml.Primitive `toml:\"engines\"`\n\t\tRetentionSweepPeriod Duration `toml:\"retention-sweep-period\"`\n\t} `toml:\"storage\"`\n\n\tCluster struct {\n\t\tDir string `toml:\"dir\"`\n\t\tProtobufPort int `toml:\"protobuf_port\"`\n\t\tProtobufTimeout Duration `toml:\"protobuf_timeout\"`\n\t\tProtobufHeartbeatInterval Duration `toml:\"protobuf_heartbeat\"`\n\t\tMinBackoff Duration `toml:\"protobuf_min_backoff\"`\n\t\tMaxBackoff Duration `toml:\"protobuf_max_backoff\"`\n\t\tWriteBufferSize int `toml:\"write-buffer-size\"`\n\t\tConcurrentShardQueryLimit int `toml:\"concurrent-shard-query-limit\"`\n\t\tMaxResponseBufferSize int `toml:\"max-response-buffer-size\"`\n\t} `toml:\"cluster\"`\n\n\tLogging struct {\n\t\tFile string `toml:\"file\"`\n\t\tLevel string `toml:\"level\"`\n\t} `toml:\"logging\"`\n}\n\n\/\/ NewConfig returns an instance of Config with reasonable defaults.\nfunc NewConfig() *Config {\n\tc := &Config{}\n\tc.Storage.RetentionSweepPeriod = Duration(10 * time.Minute)\n\tc.Cluster.ConcurrentShardQueryLimit = DefaultConcurrentShardQueryLimit\n\tc.Raft.Timeout = Duration(1 * time.Second)\n\tc.HTTPAPI.ReadTimeout = Duration(DefaultAPIReadTimeout)\n\tc.Cluster.MinBackoff = Duration(1 * time.Second)\n\tc.Cluster.MaxBackoff = Duration(10 * time.Second)\n\tc.Cluster.ProtobufHeartbeatInterval = Duration(10 * time.Millisecond)\n\tc.Storage.WriteBufferSize = 1000\n\tc.Cluster.WriteBufferSize = 1000\n\tc.Cluster.MaxResponseBufferSize = 100\n\n\t\/\/ Detect hostname (or set to localhost).\n\tif c.Hostname, _ = os.Hostname(); c.Hostname == \"\" {\n\t\tc.Hostname = \"localhost\"\n\t}\n\n\t\/\/ FIX(benbjohnson): Append where the udp servers are actually used.\n\t\/\/ config.UDPServers = append(config.UDPServers, UDPInputConfig{\n\t\/\/ \tEnabled: tomlConfiguration.InputPlugins.UDPInput.Enabled,\n\t\/\/ \tDatabase: tomlConfiguration.InputPlugins.UDPInput.Database,\n\t\/\/ \tPort: tomlConfiguration.InputPlugins.UDPInput.Port,\n\t\/\/ })\n\n\treturn c\n}\n\n\/\/ PointBatchSize returns the storage point batch size, if set.\n\/\/ If not set, the LevelDB point batch size is returned.\n\/\/ If that is not set then the default point batch size is returned.\nfunc (c *Config) PointBatchSize() int {\n\tif c.Storage.PointBatchSize != 0 {\n\t\treturn c.Storage.PointBatchSize\n\t}\n\treturn DefaultPointBatchSize\n}\n\n\/\/ WriteBatchSize returns the storage write batch size, if set.\n\/\/ If not set, the LevelDB write batch size is returned.\n\/\/ If that is not set then the default write batch size is returned.\nfunc (c *Config) WriteBatchSize() int {\n\tif c.Storage.WriteBatchSize != 0 {\n\t\treturn c.Storage.WriteBatchSize\n\t}\n\treturn DefaultWriteBatchSize\n}\n\n\/\/ MaxOpenShards returns the maximum number of shards to keep open at once.\nfunc (c *Config) MaxOpenShards() int {\n\treturn c.Storage.MaxOpenShards\n}\n\n\/\/ ApiHTTPListenAddr returns the binding address the API HTTP server\nfunc (c *Config) ApiHTTPListenAddr() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.HTTPAPI.Port)\n}\n\n\/\/ RaftListenAddr returns the binding address the Raft server\nfunc (c *Config) RaftListenAddr() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.Raft.Port)\n}\n\n\/\/ RaftConnectionString returns the address required to contact the Raft server\nfunc (c *Config) RaftConnectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", c.Hostname, c.Raft.Port)\n}\n\n\/\/ Size represents a TOML parseable file size.\n\/\/ Users can specify size using \"m\" for megabytes and \"g\" for gigabytes.\ntype Size int\n\n\/\/ UnmarshalText parses a byte size from text.\nfunc (s *Size) UnmarshalText(text []byte) error {\n\t\/\/ Parse numeric portion of value.\n\tlength := len(string(text))\n\tsize, err := strconv.ParseInt(string(text[:length-1]), 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Parse unit of measure (\"m\", \"g\", etc).\n\tswitch suffix := text[len(text)-1]; suffix {\n\tcase 'm':\n\t\tsize *= 1 << 20 \/\/ MB\n\tcase 'g':\n\t\tsize *= 1 << 30 \/\/ GB\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown size suffix: %c\", suffix)\n\t}\n\n\t\/\/ Check for overflow.\n\tif size > maxInt {\n\t\treturn fmt.Errorf(\"size %d cannot be represented by an int\", size)\n\t}\n\n\t*s = Size(size)\n\treturn nil\n}\n\n\/\/ Duration is a TOML wrapper type for time.Duration.\ntype Duration time.Duration\n\n\/\/ UnmarshalText parses a TOML value into a duration value.\nfunc (d *Duration) UnmarshalText(text []byte) error {\n\t\/\/ Ignore if there is no value set.\n\tif len(text) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Otherwise parse as a duration formatted string.\n\tduration, err := time.ParseDuration(string(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set duration and return.\n\t*d = Duration(duration)\n\treturn nil\n}\n\n\/\/ ParseConfigFile parses a configuration file at a given path.\nfunc ParseConfigFile(path string) (*Config, error) {\n\tc := NewConfig()\n\tif _, err := toml.DecodeFile(path, &c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\n\/\/ ParseConfig parses a configuration string into a config object.\nfunc ParseConfig(s string) (*Config, error) {\n\tc := NewConfig()\n\tif _, err := toml.Decode(s, &c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\n\/*\nfunc (c *Config) AdminHTTPPortString() string {\n\tif c.AdminHTTPPort <= 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.AdminHTTPPort)\n}\n\nfunc (c *Config) APIHTTPSPortString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.APIHTTPSPort)\n}\n\nfunc (c *Config) GraphitePortString() string {\n\tif c.GraphitePort <= 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.GraphitePort)\n}\n\nfunc (c *Config) UDPInputPortString(port int) string {\n\tif port <= 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, port)\n}\n\nfunc (c *Config) ProtobufConnectionString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.Hostname, c.ProtobufPort)\n}\n\nfunc (c *Config) ProtobufListenString() string {\n\treturn fmt.Sprintf(\"%s:%d\", c.BindAddress, c.ProtobufPort)\n}\n\n*\/\n\n\/\/ maxInt is the largest integer representable by a word (architeture dependent).\nconst maxInt = int64(^uint(0) >> 1)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gravitational\/version\"\n\t\"github.com\/gravitational\/version\/pkg\/tool\"\n)\n\n\/\/ pkg is the path to the package the tool will create linker flags for.\nvar pkg = flag.String(\"pkg\", \"\", \"root package path\")\n\n\/\/ versionPackage is the path to this version package.\n\/\/ It is used to access version information attributes during link time.\n\/\/ This flag is useful when the version package is custom-vendored and has a different package path.\nvar versionPackage = flag.String(\"verpkg\", \"github.com\/gravitational\/version\", \"path to the version package\")\n\n\/\/ semverPattern defines a regexp pattern to modify the results of `git describe` to be semver-complaint.\nvar semverPattern = regexp.MustCompile(`(.+)-([0-9]{1,})-g([0-9a-f]{14})$`)\n\n\/\/ goVersionPattern defines a regexp pattern to parse versions of the `go tool`.\nvar goVersionPattern = regexp.MustCompile(`go([1-9])\\.(\\d+)(?:.\\d+)*`)\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n\nfunc run() error {\n\tlog.SetFlags(0)\n\tflag.Parse()\n\tif *pkg == \"\" {\n\t\treturn fmt.Errorf(\"-pkg required\")\n\t}\n\n\tgoVersion, err := goToolVersion()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to determine go tool version: %v\\n\", err)\n\t}\n\n\tinfo, err := getVersionInfo(*pkg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to determine version information: %v\\n\", err)\n\t}\n\n\tvar linkFlags []string\n\tlinkFlag := func(key, value string) string {\n\t\tif goVersion <= 14 {\n\t\t\treturn fmt.Sprintf(\"-X %s.%s %s\", *versionPackage, key, value)\n\t\t} else {\n\t\t\treturn fmt.Sprintf(\"-X %s.%s=%s\", *versionPackage, key, value)\n\t\t}\n\t}\n\n\t\/\/ Determine the values of version-related variables as commands to the go linker.\n\tif info.GitCommit != \"\" {\n\t\tlinkFlags = append(linkFlags, linkFlag(\"gitCommit\", info.GitCommit))\n\t\tlinkFlags = append(linkFlags, linkFlag(\"gitTreeState\", info.GitTreeState))\n\t}\n\tif info.Version != \"\" {\n\t\tlinkFlags = append(linkFlags, linkFlag(\"version\", info.Version))\n\t}\n\n\tfmt.Printf(\"%s\", strings.Join(linkFlags, \" \"))\n\treturn nil\n}\n\n\/\/ getVersionInfo collects the build version information for package pkg.\nfunc getVersionInfo(pkg string) (*version.Info, error) {\n\tgit := newGit(pkg)\n\tcommitID, err := git.commitID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain git commit ID: %v\\n\", err)\n\t}\n\ttreeState, err := git.treeState()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to determine git tree state: %v\\n\", err)\n\t}\n\ttag, err := git.tag(commitID)\n\tif err != nil {\n\t\ttag = \"\"\n\t}\n\tif tag != \"\" {\n\t\ttag = semverify(tag)\n\t\tif treeState == dirty {\n\t\t\ttag = tag + \"-\" + string(treeState)\n\t\t}\n\t}\n\treturn &version.Info{\n\t\tVersion: tag,\n\t\tGitCommit: commitID,\n\t\tGitTreeState: string(treeState),\n\t}, nil\n}\n\n\/\/ goToolVersion determines the version of the `go tool`.\nfunc goToolVersion() (toolVersion, error) {\n\tgoTool := &tool.T{Cmd: \"go\"}\n\tout, err := goTool.Exec(\"version\")\n\tif err != nil {\n\t\treturn toolVersionUnknown, err\n\t}\n\tbuild := strings.Split(out, \" \")\n\tif len(build) > 2 {\n\t\treturn parseToolVersion(build[2]), nil\n\t}\n\treturn toolVersionUnknown, nil\n}\n\n\/\/ parseToolVersion translates a string version of the form 'go1.4.3' to a numeric value 14.\nfunc parseToolVersion(version string) toolVersion {\n\tmatch := goVersionPattern.FindStringSubmatch(version)\n\tif len(match) > 2 {\n\t\t\/\/ After a successful match, match[1] and match[2] are integers\n\t\tmajor := mustAtoi(match[1])\n\t\tminor := mustAtoi(match[2])\n\t\treturn toolVersion(major*10 + minor)\n\t}\n\treturn toolVersionUnknown\n}\n\nfunc newGit(pkg string) *git {\n\targs := []string{\"--work-tree\", pkg, \"--git-dir\", filepath.Join(pkg, \".git\")}\n\treturn &git{&tool.T{\n\t\tCmd: \"git\",\n\t\tArgs: args,\n\t}}\n}\n\n\/\/ git represents an instance of the git tool.\ntype git struct {\n\t*tool.T\n}\n\n\/\/ treeState describes the state of the git tree.\n\/\/ `git describe --dirty` only considers changes to existing files.\n\/\/ We track tree state and consider untracked files as they also affect the build.\ntype treeState string\n\nconst (\n\tclean treeState = \"clean\"\n\tdirty = \"dirty\"\n)\n\n\/\/ toolVersion represents a tool version as an integer.\n\/\/ toolVersion only considers the first two significant version parts and is computed as follows:\n\/\/ \tmajorVersion*10+minorVersion\ntype toolVersion int\n\nconst toolVersionUnknown toolVersion = 0\n\nfunc (r *git) commitID() (string, error) {\n\treturn r.Exec(\"rev-parse\", \"HEAD^{commit}\")\n}\n\nfunc (r *git) treeState() (treeState, error) {\n\tout, err := r.Exec(\"status\", \"--porcelain\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(out) == 0 {\n\t\treturn clean, nil\n\t}\n\treturn dirty, nil\n}\n\nfunc (r *git) tag(commitID string) (string, error) {\n\treturn r.Exec(\"describe\", \"--tags\", \"--abbrev=14\", commitID+\"^{commit}\")\n}\n\n\/\/ semverify transforms the output of `git describe` to be semver-complaint.\nfunc semverify(version string) string {\n\tvar result []byte\n\tmatch := semverPattern.FindStringSubmatchIndex(version)\n\tif match != nil {\n\t\treturn string(semverPattern.ExpandString(result, \"$1.$2+$3\", string(version), match))\n\t}\n\treturn version\n}\n\n\/\/ mustAtoi converts value to an integer.\n\/\/ It panics if the value does not represent a valid integer.\nfunc mustAtoi(value string) int {\n\tresult, err := strconv.Atoi(value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}\n<commit_msg>Added a -compat flag to run linkflags in go1.4 linker flags syntax compatibility mode.<commit_after>\/*\nCopyright 2015 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gravitational\/version\"\n\t\"github.com\/gravitational\/version\/pkg\/tool\"\n)\n\n\/\/ pkg is the path to the package the tool will create linker flags for.\nvar pkg = flag.String(\"pkg\", \"\", \"root package path\")\n\n\/\/ versionPackage is the path to this version package.\n\/\/ It is used to access version information attributes during link time.\n\/\/ This flag is useful when the version package is custom-vendored and has a different package path.\nvar versionPackage = flag.String(\"verpkg\", \"github.com\/gravitational\/version\", \"path to the version package\")\n\nvar compatMode = flag.Bool(\"compat\", false, \"generate linker flags using go1.4 syntax\")\n\n\/\/ semverPattern defines a regexp pattern to modify the results of `git describe` to be semver-complaint.\nvar semverPattern = regexp.MustCompile(`(.+)-([0-9]{1,})-g([0-9a-f]{14})$`)\n\n\/\/ goVersionPattern defines a regexp pattern to parse versions of the `go tool`.\nvar goVersionPattern = regexp.MustCompile(`go([1-9])\\.(\\d+)(?:.\\d+)*`)\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n\nfunc run() error {\n\tlog.SetFlags(0)\n\tflag.Parse()\n\tif *pkg == \"\" {\n\t\treturn fmt.Errorf(\"-pkg required\")\n\t}\n\n\tgoVersion, err := goToolVersion()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to determine go tool version: %v\\n\", err)\n\t}\n\n\tinfo, err := getVersionInfo(*pkg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to determine version information: %v\\n\", err)\n\t}\n\n\tvar linkFlags []string\n\tlinkFlag := func(key, value string) string {\n\t\tif goVersion <= 14 || *compatMode {\n\t\t\treturn fmt.Sprintf(\"-X %s.%s %s\", *versionPackage, key, value)\n\t\t} else {\n\t\t\treturn fmt.Sprintf(\"-X %s.%s=%s\", *versionPackage, key, value)\n\t\t}\n\t}\n\n\t\/\/ Determine the values of version-related variables as commands to the go linker.\n\tif info.GitCommit != \"\" {\n\t\tlinkFlags = append(linkFlags, linkFlag(\"gitCommit\", info.GitCommit))\n\t\tlinkFlags = append(linkFlags, linkFlag(\"gitTreeState\", info.GitTreeState))\n\t}\n\tif info.Version != \"\" {\n\t\tlinkFlags = append(linkFlags, linkFlag(\"version\", info.Version))\n\t}\n\n\tfmt.Printf(\"%s\", strings.Join(linkFlags, \" \"))\n\treturn nil\n}\n\n\/\/ getVersionInfo collects the build version information for package pkg.\nfunc getVersionInfo(pkg string) (*version.Info, error) {\n\tgit := newGit(pkg)\n\tcommitID, err := git.commitID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain git commit ID: %v\\n\", err)\n\t}\n\ttreeState, err := git.treeState()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to determine git tree state: %v\\n\", err)\n\t}\n\ttag, err := git.tag(commitID)\n\tif err != nil {\n\t\ttag = \"\"\n\t}\n\tif tag != \"\" {\n\t\ttag = semverify(tag)\n\t\tif treeState == dirty {\n\t\t\ttag = tag + \"-\" + string(treeState)\n\t\t}\n\t}\n\treturn &version.Info{\n\t\tVersion: tag,\n\t\tGitCommit: commitID,\n\t\tGitTreeState: string(treeState),\n\t}, nil\n}\n\n\/\/ goToolVersion determines the version of the `go tool`.\nfunc goToolVersion() (toolVersion, error) {\n\tgoTool := &tool.T{Cmd: \"go\"}\n\tout, err := goTool.Exec(\"version\")\n\tif err != nil {\n\t\treturn toolVersionUnknown, err\n\t}\n\tbuild := strings.Split(out, \" \")\n\tif len(build) > 2 {\n\t\treturn parseToolVersion(build[2]), nil\n\t}\n\treturn toolVersionUnknown, nil\n}\n\n\/\/ parseToolVersion translates a string version of the form 'go1.4.3' to a numeric value 14.\nfunc parseToolVersion(version string) toolVersion {\n\tmatch := goVersionPattern.FindStringSubmatch(version)\n\tif len(match) > 2 {\n\t\t\/\/ After a successful match, match[1] and match[2] are integers\n\t\tmajor := mustAtoi(match[1])\n\t\tminor := mustAtoi(match[2])\n\t\treturn toolVersion(major*10 + minor)\n\t}\n\treturn toolVersionUnknown\n}\n\nfunc newGit(pkg string) *git {\n\targs := []string{\"--work-tree\", pkg, \"--git-dir\", filepath.Join(pkg, \".git\")}\n\treturn &git{&tool.T{\n\t\tCmd: \"git\",\n\t\tArgs: args,\n\t}}\n}\n\n\/\/ git represents an instance of the git tool.\ntype git struct {\n\t*tool.T\n}\n\n\/\/ treeState describes the state of the git tree.\n\/\/ `git describe --dirty` only considers changes to existing files.\n\/\/ We track tree state and consider untracked files as they also affect the build.\ntype treeState string\n\nconst (\n\tclean treeState = \"clean\"\n\tdirty = \"dirty\"\n)\n\n\/\/ toolVersion represents a tool version as an integer.\n\/\/ toolVersion only considers the first two significant version parts and is computed as follows:\n\/\/ \tmajorVersion*10+minorVersion\ntype toolVersion int\n\nconst toolVersionUnknown toolVersion = 0\n\nfunc (r *git) commitID() (string, error) {\n\treturn r.Exec(\"rev-parse\", \"HEAD^{commit}\")\n}\n\nfunc (r *git) treeState() (treeState, error) {\n\tout, err := r.Exec(\"status\", \"--porcelain\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(out) == 0 {\n\t\treturn clean, nil\n\t}\n\treturn dirty, nil\n}\n\nfunc (r *git) tag(commitID string) (string, error) {\n\treturn r.Exec(\"describe\", \"--tags\", \"--abbrev=14\", commitID+\"^{commit}\")\n}\n\n\/\/ semverify transforms the output of `git describe` to be semver-complaint.\nfunc semverify(version string) string {\n\tvar result []byte\n\tmatch := semverPattern.FindStringSubmatchIndex(version)\n\tif match != nil {\n\t\treturn string(semverPattern.ExpandString(result, \"$1.$2+$3\", string(version), match))\n\t}\n\treturn version\n}\n\n\/\/ mustAtoi converts value to an integer.\n\/\/ It panics if the value does not represent a valid integer.\nfunc mustAtoi(value string) int {\n\tresult, err := strconv.Atoi(value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\"\n\t\"code.cloudfoundry.org\/cfhttp\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/consuladapter\"\n\t\"code.cloudfoundry.org\/debugserver\"\n\t\"code.cloudfoundry.org\/diego-ssh\/authenticators\"\n\t\"code.cloudfoundry.org\/diego-ssh\/healthcheck\"\n\t\"code.cloudfoundry.org\/diego-ssh\/proxy\"\n\t\"code.cloudfoundry.org\/diego-ssh\/server\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"code.cloudfoundry.org\/lager\/lagerflags\"\n\t\"code.cloudfoundry.org\/locket\"\n\t\"github.com\/cloudfoundry\/dropsonde\"\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/http_server\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\nvar address = flag.String(\n\t\"address\",\n\t\":2222\",\n\t\"listen address for ssh proxy\",\n)\n\nvar healthCheckAddress = flag.String(\n\t\"healthCheckAddress\",\n\t\":2223\",\n\t\"listen address for ssh proxy health check server\",\n)\n\nvar hostKey = flag.String(\n\t\"hostKey\",\n\t\"\",\n\t\"PEM encoded RSA host key\",\n)\n\nvar bbsAddress = flag.String(\n\t\"bbsAddress\",\n\t\"\",\n\t\"Address of the BBS API Server\",\n)\n\nvar ccAPIURL = flag.String(\n\t\"ccAPIURL\",\n\t\"\",\n\t\"URL of Cloud Controller API\",\n)\n\nvar uaaTokenURL = flag.String(\n\t\"uaaTokenURL\",\n\t\"\",\n\t\"URL of the UAA OAuth2 token endpoint that includes the oauth client ID and password\",\n)\n\nvar uaaPassword = flag.String(\n\t\"uaaPassword\",\n\t\"\",\n\t\"Basic auth password for UAA.\",\n)\n\nvar uaaUsername = flag.String(\n\t\"uaaUsername\",\n\t\"\",\n\t\"Username for UAA\",\n)\n\nvar skipCertVerify = flag.Bool(\n\t\"skipCertVerify\",\n\tfalse,\n\t\"skip SSL certificate verification\",\n)\n\nvar communicationTimeout = flag.Duration(\n\t\"communicationTimeout\",\n\t10*time.Second,\n\t\"Timeout applied to all HTTP requests.\",\n)\n\nvar dropsondePort = flag.Int(\n\t\"dropsondePort\",\n\t3457,\n\t\"port the local metron agent is listening on\",\n)\n\nvar enableCFAuth = flag.Bool(\n\t\"enableCFAuth\",\n\tfalse,\n\t\"Allow authentication with cf\",\n)\n\nvar enableDiegoAuth = flag.Bool(\n\t\"enableDiegoAuth\",\n\tfalse,\n\t\"Allow authentication with diego\",\n)\n\nvar diegoCredentials = flag.String(\n\t\"diegoCredentials\",\n\t\"\",\n\t\"Diego Credentials to be used with the Diego authentication method\",\n)\n\nvar bbsCACert = flag.String(\n\t\"bbsCACert\",\n\t\"\",\n\t\"path to certificate authority cert used for mutually authenticated TLS BBS communication\",\n)\n\nvar bbsClientCert = flag.String(\n\t\"bbsClientCert\",\n\t\"\",\n\t\"path to client cert used for mutually authenticated TLS BBS communication\",\n)\n\nvar bbsClientKey = flag.String(\n\t\"bbsClientKey\",\n\t\"\",\n\t\"path to client key used for mutually authenticated TLS BBS communication\",\n)\n\nvar bbsClientSessionCacheSize = flag.Int(\n\t\"bbsClientSessionCacheSize\",\n\t0,\n\t\"Capacity of the ClientSessionCache option on the TLS configuration. If zero, golang's default will be used\",\n)\n\nvar bbsMaxIdleConnsPerHost = flag.Int(\n\t\"bbsMaxIdleConnsPerHost\",\n\t0,\n\t\"Controls the maximum number of idle (keep-alive) connctions per host. If zero, golang's default will be used\",\n)\n\nvar consulCluster = flag.String(\n\t\"consulCluster\",\n\t\"\",\n\t\"Consul Agent URL\",\n)\n\nvar allowedCiphers = flag.String(\n\t\"allowedCiphers\",\n\t\"\",\n\t\"Limit cipher algorithms to those provided (comma separated)\",\n)\n\nvar allowedMACs = flag.String(\n\t\"allowedMACs\",\n\t\"\",\n\t\"Limit MAC algorithms to those provided (comma separated)\",\n)\n\nvar allowedKeyExchanges = flag.String(\n\t\"allowedKeyExchanges\",\n\t\"\",\n\t\"Limit key exchanges algorithms to those provided (comma separated)\",\n)\n\nconst (\n\tdropsondeOrigin = \"ssh-proxy\"\n)\n\nfunc main() {\n\tdebugserver.AddFlags(flag.CommandLine)\n\tlagerflags.AddFlags(flag.CommandLine)\n\tflag.Parse()\n\n\tcfhttp.Initialize(*communicationTimeout)\n\n\tlogger, reconfigurableSink := lagerflags.New(\"ssh-proxy\")\n\n\tinitializeDropsonde(logger)\n\n\tproxyConfig, err := configureProxy(logger)\n\tif err != nil {\n\t\tlogger.Error(\"configure-failed\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsshProxy := proxy.New(logger, proxyConfig)\n\tserver := server.NewServer(logger, *address, sshProxy)\n\n\thealthCheckHandler := healthcheck.NewHandler(logger)\n\thttpServer := http_server.New(*healthCheckAddress, healthCheckHandler)\n\n\tconsulClient, err := consuladapter.NewClientFromUrl(*consulCluster)\n\tif err != nil {\n\t\tlogger.Fatal(\"new-client-failed\", err)\n\t}\n\n\tregistrationRunner := initializeRegistrationRunner(logger, consulClient, *address, clock.NewClock())\n\n\tmembers := grouper.Members{\n\t\t{\"ssh-proxy\", server},\n\t\t{\"registration-runner\", registrationRunner},\n\t\t{\"healthcheck\", httpServer},\n\t}\n\n\tif dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != \"\" {\n\t\tmembers = append(grouper.Members{{\n\t\t\t\"debug-server\", debugserver.Runner(dbgAddr, reconfigurableSink),\n\t\t}}, members...)\n\t}\n\n\tgroup := grouper.NewOrdered(os.Interrupt, members)\n\tmonitor := ifrit.Invoke(sigmon.New(group))\n\n\tlogger.Info(\"started\")\n\n\terr = <-monitor.Wait()\n\tif err != nil {\n\t\tlogger.Error(\"exited-with-failure\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Info(\"exited\")\n\tos.Exit(0)\n}\n\nfunc configureProxy(logger lager.Logger) (*ssh.ServerConfig, error) {\n\tif *bbsAddress == \"\" {\n\t\terr := errors.New(\"bbsAddress is required\")\n\t\tlogger.Fatal(\"bbs-address-required\", err)\n\t}\n\n\turl, err := url.Parse(*bbsAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-to-parse-bbs-address\", err)\n\t}\n\n\tbbsClient := initializeBBSClient(logger)\n\tpermissionsBuilder := authenticators.NewPermissionsBuilder(bbsClient)\n\n\tauthens := []authenticators.PasswordAuthenticator{}\n\n\tif *enableDiegoAuth {\n\t\tdiegoAuthenticator := authenticators.NewDiegoProxyAuthenticator(logger, []byte(*diegoCredentials), permissionsBuilder)\n\t\tauthens = append(authens, diegoAuthenticator)\n\t}\n\n\tif *enableCFAuth {\n\t\tif *ccAPIURL == \"\" {\n\t\t\treturn nil, errors.New(\"ccAPIURL is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\t_, err = url.Parse(*ccAPIURL)\n\t\tif *ccAPIURL != \"\" && err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif *uaaPassword == \"\" {\n\t\t\treturn nil, errors.New(\"UAA password is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\tif *uaaUsername == \"\" {\n\t\t\treturn nil, errors.New(\"UAA username is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\tif *uaaTokenURL == \"\" {\n\t\t\treturn nil, errors.New(\"uaaTokenURL is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\t_, err = url.Parse(*uaaTokenURL)\n\t\tif *uaaTokenURL != \"\" && err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient := NewHttpClient()\n\t\tcfAuthenticator := authenticators.NewCFAuthenticator(\n\t\t\tlogger,\n\t\t\tclient,\n\t\t\t*ccAPIURL,\n\t\t\t*uaaTokenURL,\n\t\t\t*uaaUsername,\n\t\t\t*uaaPassword,\n\t\t\tpermissionsBuilder,\n\t\t)\n\t\tauthens = append(authens, cfAuthenticator)\n\t}\n\n\tauthenticator := authenticators.NewCompositeAuthenticator(authens...)\n\n\tsshConfig := &ssh.ServerConfig{\n\t\tPasswordCallback: authenticator.Authenticate,\n\t\tAuthLogCallback: func(cmd ssh.ConnMetadata, method string, err error) {\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"authentication-failed\", err, lager.Data{\"user\": cmd.User()})\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"authentication-attempted\", lager.Data{\"user\": cmd.User()})\n\t\t\t}\n\t\t},\n\t}\n\n\tif *hostKey == \"\" {\n\t\terr := errors.New(\"hostKey is required\")\n\t\tlogger.Fatal(\"host-key-required\", err)\n\t}\n\n\tkey, err := parsePrivateKey(logger, *hostKey)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-to-parse-host-key\", err)\n\t}\n\n\tsshConfig.AddHostKey(key)\n\n\tif *allowedCiphers != \"\" {\n\t\tsshConfig.Config.Ciphers = strings.Split(*allowedCiphers, \",\")\n\t}\n\tif *allowedMACs != \"\" {\n\t\tsshConfig.Config.MACs = strings.Split(*allowedMACs, \",\")\n\t}\n\tif *allowedKeyExchanges != \"\" {\n\t\tsshConfig.Config.KeyExchanges = strings.Split(*allowedKeyExchanges, \",\")\n\t}\n\n\treturn sshConfig, err\n}\n\nfunc initializeDropsonde(logger lager.Logger) {\n\tdropsondeDestination := fmt.Sprint(\"localhost:\", *dropsondePort)\n\terr := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)\n\tif err != nil {\n\t\tlogger.Error(\"failed to initialize dropsonde: %v\", err)\n\t}\n}\n\nfunc parsePrivateKey(logger lager.Logger, encodedKey string) (ssh.Signer, error) {\n\tkey, err := ssh.ParsePrivateKey([]byte(encodedKey))\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-parse-private-key\", err)\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}\n\nfunc NewHttpClient() *http.Client {\n\tdialer := &net.Dialer{Timeout: 5 * time.Second}\n\ttlsConfig := &tls.Config{InsecureSkipVerify: *skipCertVerify}\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dialer.Dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: *communicationTimeout,\n\t}\n}\n\nfunc initializeBBSClient(logger lager.Logger) bbs.InternalClient {\n\tbbsURL, err := url.Parse(*bbsAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"Invalid BBS URL\", err)\n\t}\n\n\tif bbsURL.Scheme != \"https\" {\n\t\treturn bbs.NewClient(*bbsAddress)\n\t}\n\n\tbbsClient, err := bbs.NewSecureClient(*bbsAddress, *bbsCACert, *bbsClientCert, *bbsClientKey, *bbsClientSessionCacheSize, *bbsMaxIdleConnsPerHost)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to configure secure BBS client\", err)\n\t}\n\treturn bbsClient\n}\n\nfunc initializeRegistrationRunner(logger lager.Logger, consulClient consuladapter.Client, listenAddress string, clock clock.Clock) ifrit.Runner {\n\t_, portString, err := net.SplitHostPort(listenAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-address\", err)\n\t}\n\tportNum, err := net.LookupPort(\"tcp\", portString)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-port\", err)\n\t}\n\n\tregistration := &api.AgentServiceRegistration{\n\t\tName: \"ssh-proxy\",\n\t\tPort: portNum,\n\t\tCheck: &api.AgentServiceCheck{\n\t\t\tTTL: \"3s\",\n\t\t},\n\t}\n\n\treturn locket.NewRegistrationRunner(logger, registration, consulClient, locket.RetryInterval, clock)\n}\n<commit_msg>Add a KeepAlive to ssh-proxy httpclient<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\"\n\t\"code.cloudfoundry.org\/cfhttp\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/consuladapter\"\n\t\"code.cloudfoundry.org\/debugserver\"\n\t\"code.cloudfoundry.org\/diego-ssh\/authenticators\"\n\t\"code.cloudfoundry.org\/diego-ssh\/healthcheck\"\n\t\"code.cloudfoundry.org\/diego-ssh\/proxy\"\n\t\"code.cloudfoundry.org\/diego-ssh\/server\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"code.cloudfoundry.org\/lager\/lagerflags\"\n\t\"code.cloudfoundry.org\/locket\"\n\t\"github.com\/cloudfoundry\/dropsonde\"\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/http_server\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\nvar address = flag.String(\n\t\"address\",\n\t\":2222\",\n\t\"listen address for ssh proxy\",\n)\n\nvar healthCheckAddress = flag.String(\n\t\"healthCheckAddress\",\n\t\":2223\",\n\t\"listen address for ssh proxy health check server\",\n)\n\nvar hostKey = flag.String(\n\t\"hostKey\",\n\t\"\",\n\t\"PEM encoded RSA host key\",\n)\n\nvar bbsAddress = flag.String(\n\t\"bbsAddress\",\n\t\"\",\n\t\"Address of the BBS API Server\",\n)\n\nvar ccAPIURL = flag.String(\n\t\"ccAPIURL\",\n\t\"\",\n\t\"URL of Cloud Controller API\",\n)\n\nvar uaaTokenURL = flag.String(\n\t\"uaaTokenURL\",\n\t\"\",\n\t\"URL of the UAA OAuth2 token endpoint that includes the oauth client ID and password\",\n)\n\nvar uaaPassword = flag.String(\n\t\"uaaPassword\",\n\t\"\",\n\t\"Basic auth password for UAA.\",\n)\n\nvar uaaUsername = flag.String(\n\t\"uaaUsername\",\n\t\"\",\n\t\"Username for UAA\",\n)\n\nvar skipCertVerify = flag.Bool(\n\t\"skipCertVerify\",\n\tfalse,\n\t\"skip SSL certificate verification\",\n)\n\nvar communicationTimeout = flag.Duration(\n\t\"communicationTimeout\",\n\t10*time.Second,\n\t\"Timeout applied to all HTTP requests.\",\n)\n\nvar dropsondePort = flag.Int(\n\t\"dropsondePort\",\n\t3457,\n\t\"port the local metron agent is listening on\",\n)\n\nvar enableCFAuth = flag.Bool(\n\t\"enableCFAuth\",\n\tfalse,\n\t\"Allow authentication with cf\",\n)\n\nvar enableDiegoAuth = flag.Bool(\n\t\"enableDiegoAuth\",\n\tfalse,\n\t\"Allow authentication with diego\",\n)\n\nvar diegoCredentials = flag.String(\n\t\"diegoCredentials\",\n\t\"\",\n\t\"Diego Credentials to be used with the Diego authentication method\",\n)\n\nvar bbsCACert = flag.String(\n\t\"bbsCACert\",\n\t\"\",\n\t\"path to certificate authority cert used for mutually authenticated TLS BBS communication\",\n)\n\nvar bbsClientCert = flag.String(\n\t\"bbsClientCert\",\n\t\"\",\n\t\"path to client cert used for mutually authenticated TLS BBS communication\",\n)\n\nvar bbsClientKey = flag.String(\n\t\"bbsClientKey\",\n\t\"\",\n\t\"path to client key used for mutually authenticated TLS BBS communication\",\n)\n\nvar bbsClientSessionCacheSize = flag.Int(\n\t\"bbsClientSessionCacheSize\",\n\t0,\n\t\"Capacity of the ClientSessionCache option on the TLS configuration. If zero, golang's default will be used\",\n)\n\nvar bbsMaxIdleConnsPerHost = flag.Int(\n\t\"bbsMaxIdleConnsPerHost\",\n\t0,\n\t\"Controls the maximum number of idle (keep-alive) connctions per host. If zero, golang's default will be used\",\n)\n\nvar consulCluster = flag.String(\n\t\"consulCluster\",\n\t\"\",\n\t\"Consul Agent URL\",\n)\n\nvar allowedCiphers = flag.String(\n\t\"allowedCiphers\",\n\t\"\",\n\t\"Limit cipher algorithms to those provided (comma separated)\",\n)\n\nvar allowedMACs = flag.String(\n\t\"allowedMACs\",\n\t\"\",\n\t\"Limit MAC algorithms to those provided (comma separated)\",\n)\n\nvar allowedKeyExchanges = flag.String(\n\t\"allowedKeyExchanges\",\n\t\"\",\n\t\"Limit key exchanges algorithms to those provided (comma separated)\",\n)\n\nconst (\n\tdropsondeOrigin = \"ssh-proxy\"\n)\n\nfunc main() {\n\tdebugserver.AddFlags(flag.CommandLine)\n\tlagerflags.AddFlags(flag.CommandLine)\n\tflag.Parse()\n\n\tcfhttp.Initialize(*communicationTimeout)\n\n\tlogger, reconfigurableSink := lagerflags.New(\"ssh-proxy\")\n\n\tinitializeDropsonde(logger)\n\n\tproxyConfig, err := configureProxy(logger)\n\tif err != nil {\n\t\tlogger.Error(\"configure-failed\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsshProxy := proxy.New(logger, proxyConfig)\n\tserver := server.NewServer(logger, *address, sshProxy)\n\n\thealthCheckHandler := healthcheck.NewHandler(logger)\n\thttpServer := http_server.New(*healthCheckAddress, healthCheckHandler)\n\n\tconsulClient, err := consuladapter.NewClientFromUrl(*consulCluster)\n\tif err != nil {\n\t\tlogger.Fatal(\"new-client-failed\", err)\n\t}\n\n\tregistrationRunner := initializeRegistrationRunner(logger, consulClient, *address, clock.NewClock())\n\n\tmembers := grouper.Members{\n\t\t{\"ssh-proxy\", server},\n\t\t{\"registration-runner\", registrationRunner},\n\t\t{\"healthcheck\", httpServer},\n\t}\n\n\tif dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != \"\" {\n\t\tmembers = append(grouper.Members{{\n\t\t\t\"debug-server\", debugserver.Runner(dbgAddr, reconfigurableSink),\n\t\t}}, members...)\n\t}\n\n\tgroup := grouper.NewOrdered(os.Interrupt, members)\n\tmonitor := ifrit.Invoke(sigmon.New(group))\n\n\tlogger.Info(\"started\")\n\n\terr = <-monitor.Wait()\n\tif err != nil {\n\t\tlogger.Error(\"exited-with-failure\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Info(\"exited\")\n\tos.Exit(0)\n}\n\nfunc configureProxy(logger lager.Logger) (*ssh.ServerConfig, error) {\n\tif *bbsAddress == \"\" {\n\t\terr := errors.New(\"bbsAddress is required\")\n\t\tlogger.Fatal(\"bbs-address-required\", err)\n\t}\n\n\turl, err := url.Parse(*bbsAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-to-parse-bbs-address\", err)\n\t}\n\n\tbbsClient := initializeBBSClient(logger)\n\tpermissionsBuilder := authenticators.NewPermissionsBuilder(bbsClient)\n\n\tauthens := []authenticators.PasswordAuthenticator{}\n\n\tif *enableDiegoAuth {\n\t\tdiegoAuthenticator := authenticators.NewDiegoProxyAuthenticator(logger, []byte(*diegoCredentials), permissionsBuilder)\n\t\tauthens = append(authens, diegoAuthenticator)\n\t}\n\n\tif *enableCFAuth {\n\t\tif *ccAPIURL == \"\" {\n\t\t\treturn nil, errors.New(\"ccAPIURL is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\t_, err = url.Parse(*ccAPIURL)\n\t\tif *ccAPIURL != \"\" && err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif *uaaPassword == \"\" {\n\t\t\treturn nil, errors.New(\"UAA password is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\tif *uaaUsername == \"\" {\n\t\t\treturn nil, errors.New(\"UAA username is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\tif *uaaTokenURL == \"\" {\n\t\t\treturn nil, errors.New(\"uaaTokenURL is required for Cloud Foundry authentication\")\n\t\t}\n\n\t\t_, err = url.Parse(*uaaTokenURL)\n\t\tif *uaaTokenURL != \"\" && err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient := NewHttpClient()\n\t\tcfAuthenticator := authenticators.NewCFAuthenticator(\n\t\t\tlogger,\n\t\t\tclient,\n\t\t\t*ccAPIURL,\n\t\t\t*uaaTokenURL,\n\t\t\t*uaaUsername,\n\t\t\t*uaaPassword,\n\t\t\tpermissionsBuilder,\n\t\t)\n\t\tauthens = append(authens, cfAuthenticator)\n\t}\n\n\tauthenticator := authenticators.NewCompositeAuthenticator(authens...)\n\n\tsshConfig := &ssh.ServerConfig{\n\t\tPasswordCallback: authenticator.Authenticate,\n\t\tAuthLogCallback: func(cmd ssh.ConnMetadata, method string, err error) {\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"authentication-failed\", err, lager.Data{\"user\": cmd.User()})\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"authentication-attempted\", lager.Data{\"user\": cmd.User()})\n\t\t\t}\n\t\t},\n\t}\n\n\tif *hostKey == \"\" {\n\t\terr := errors.New(\"hostKey is required\")\n\t\tlogger.Fatal(\"host-key-required\", err)\n\t}\n\n\tkey, err := parsePrivateKey(logger, *hostKey)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-to-parse-host-key\", err)\n\t}\n\n\tsshConfig.AddHostKey(key)\n\n\tif *allowedCiphers != \"\" {\n\t\tsshConfig.Config.Ciphers = strings.Split(*allowedCiphers, \",\")\n\t}\n\tif *allowedMACs != \"\" {\n\t\tsshConfig.Config.MACs = strings.Split(*allowedMACs, \",\")\n\t}\n\tif *allowedKeyExchanges != \"\" {\n\t\tsshConfig.Config.KeyExchanges = strings.Split(*allowedKeyExchanges, \",\")\n\t}\n\n\treturn sshConfig, err\n}\n\nfunc initializeDropsonde(logger lager.Logger) {\n\tdropsondeDestination := fmt.Sprint(\"localhost:\", *dropsondePort)\n\terr := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)\n\tif err != nil {\n\t\tlogger.Error(\"failed to initialize dropsonde: %v\", err)\n\t}\n}\n\nfunc parsePrivateKey(logger lager.Logger, encodedKey string) (ssh.Signer, error) {\n\tkey, err := ssh.ParsePrivateKey([]byte(encodedKey))\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-parse-private-key\", err)\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}\n\nfunc NewHttpClient() *http.Client {\n\tdialer := &net.Dialer{\n\t\tTimeout: 5 * time.Second,\n\t\tKeepAlive: 30 * time.Second,\n\t}\n\n\ttlsConfig := &tls.Config{InsecureSkipVerify: *skipCertVerify}\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dialer.Dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: *communicationTimeout,\n\t}\n}\n\nfunc initializeBBSClient(logger lager.Logger) bbs.InternalClient {\n\tbbsURL, err := url.Parse(*bbsAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"Invalid BBS URL\", err)\n\t}\n\n\tif bbsURL.Scheme != \"https\" {\n\t\treturn bbs.NewClient(*bbsAddress)\n\t}\n\n\tbbsClient, err := bbs.NewSecureClient(*bbsAddress, *bbsCACert, *bbsClientCert, *bbsClientKey, *bbsClientSessionCacheSize, *bbsMaxIdleConnsPerHost)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to configure secure BBS client\", err)\n\t}\n\treturn bbsClient\n}\n\nfunc initializeRegistrationRunner(logger lager.Logger, consulClient consuladapter.Client, listenAddress string, clock clock.Clock) ifrit.Runner {\n\t_, portString, err := net.SplitHostPort(listenAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-address\", err)\n\t}\n\tportNum, err := net.LookupPort(\"tcp\", portString)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-port\", err)\n\t}\n\n\tregistration := &api.AgentServiceRegistration{\n\t\tName: \"ssh-proxy\",\n\t\tPort: portNum,\n\t\tCheck: &api.AgentServiceCheck{\n\t\t\tTTL: \"3s\",\n\t\t},\n\t}\n\n\treturn locket.NewRegistrationRunner(logger, registration, consulClient, locket.RetryInterval, clock)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype Summary struct {\n\tTitle string\n\tEdges map[Arc]*Edge\n\tNodes map[string]*Node\n\tTotalCalls int\n\tTotalEdges int\n}\n\nfunc writeSVG(w io.Writer, summary *Summary) error {\n\tdotcmd := exec.Command(\"dot\", \"-Tsvg\")\n\tvar output bytes.Buffer\n\tdotcmd.Stdout = &output\n\tdotcmd.Stderr = os.Stderr\n\tpr, pw := io.Pipe()\n\tdotcmd.Stdin = pr\n\tif err := dotcmd.Start(); err != nil {\n\t\tlog.Fatalf(\"cannot exec dot: %v\", err)\n\t}\n\tif err := dotTemplate.Execute(pw, summary); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpw.Close()\n\tif err := dotcmd.Wait(); err != nil {\n\t\tlog.Fatalf(\"dot failed: %v\", err)\n\t}\n\tif _, err := os.Stdout.Write(rewriteSVG(output.Bytes())); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc fontSize(x, y int) float64 {\n\tif y == 0 {\n\t\treturn 0\n\t}\n\treturn 8 + (50 * math.Sqrt(float64(x)\/float64(y)))\n}\n\nfunc lineAttrs(x, y int) string {\n\tvar frac float64\n\tif y == 0 {\n\t\tfrac = 0\n\t} else {\n\t\tfrac = 3 * float64(x) \/ float64(y)\n\t}\n\tif frac > 1 {\n\t\t\/\/ SVG output treats line widths < 1 poorly.\n\t\tfrac = 1\n\t}\n\tw := frac * 2\n\tif w < 1 {\n\t\tw = 1\n\t}\n\t\/\/ Dot sometimes segfaults if given edge weights that are too large, so\n\t\/\/ we cap the weights at a large value\n\tedgeWeight := math.Pow(float64(x), 0.7)\n\tif edgeWeight > 100000 {\n\t\tedgeWeight = 100000\n\t}\n\tedgeWeight = math.Floor(edgeWeight)\n\treturn fmt.Sprintf(`weight=%g, style=\"setlinewidth(%f)\"`, edgeWeight, w)\n}\n\n\/\/ See dot(1).\nvar dotTemplate = template.Must(template.New(\"\").Funcs(template.FuncMap{\n\t\"fontSize\": fontSize,\n\t\"lineAttrs\": lineAttrs,\n}).Parse(`\n\tdigraph {{.Title|printf \"%q\"}} {\n\tsize=\"8,11\"\n\tnode [width=0.375,height=0.25];\n\t{{range $n := .Nodes}}\n\tN{{$n.Id}} [\n\t\tlabel={{printf \"%s [%d]\" $n.Title $n.Count | printf \"%q\"}},\n\t\tshape=box,\n\t\tfontsize={{fontSize $n.Count $.TotalCalls}},\n\t];\n\t{{end}}\n\t{{range $arc, $e := .Edges}}\n\tN{{$arc.Node1.Id}} -> N{{$arc.Node0.Id}} [label={{$e.Count}}, {{lineAttrs $e.Count $.TotalEdges}}];\n\t{{end}}\n}\n`))\n\nfunc rewriteSVG(data []byte) []byte {\n\t\/\/ Dot's SVG output is\n\t\/\/\n\t\/\/ <svg width=\"___\" height=\"___\"\n\t\/\/ viewBox=\"___\" xmlns=...>\n\t\/\/ <g id=\"graph0\" transform=\"...\">\n\t\/\/ ...\n\t\/\/ <\/g>\n\t\/\/ <\/svg>\n\t\/\/\n\t\/\/ Change it to\n\t\/\/\n\t\/\/ <svg width=\"100%\" height=\"100%\"\n\t\/\/ xmlns=...>\n\t\/\/ $svg_javascript\n\t\/\/ <g id=\"viewport\" transform=\"translate(0,0)\">\n\t\/\/ <g id=\"graph0\" transform=\"...\">\n\t\/\/ ...\n\t\/\/ <\/g>\n\t\/\/ <\/g>\n\t\/\/ <\/svg>\n\n\t\/\/ Fix width, height; drop viewBox.\n\tdata = regexpReplace(data,\n\t\t`(?s)<svg width=\"[^\"]+\" height=\"[^\"]+\"(.*?)viewBox=\"[^\"]+\"`,\n\t\t`<svg width=\"100%\" height=\"100%\"$1`)\n\n\t\/\/ Insert script, viewport <g> above first <g>\n\tviewport := `<g id=\"viewport\" transform=\"translate(0,0)\">\n`\n\tdata = regexpReplace(data, `<g id=\"graph\\d\"(.*?)`, svgJavascript+viewport+\"$0\")\n\n\t\/\/ Insert final <\/g> above <\/svg>.\n\tdata = regexpReplace(data, `(.*)(<\/svg>)`, `$1<\/g>$2`)\n\tdata = regexpReplace(data, `<g id=\"graph\\d\"(.*?)`, `<g id=\"viewport\"$1`)\n\treturn data\n}\n\nfunc regexpReplace(data []byte, re string, replacement string) []byte {\n\trec := regexp.MustCompile(re)\n\treturn rec.ReplaceAll(data, []byte(replacement))\n}\n\nfunc init() {\n\tif strings.Contains(svgJavascript, \"$\") {\n\t\tpanic(\"javascript contains $ - can't be used as regexp substitute\")\n\t}\n}\n\nconst svgJavascript = `\n<script type=\"text\/ecmascript\"><![CDATA[\n\/\/ SVGPan\n\/\/ http:\/\/www.cyberz.org\/blog\/2009\/12\/08\/svgpan-a-javascript-svg-panzoomdrag-library\/\n\/\/ Local modification: if(true || ...) below to force panning, never moving.\n\/\/ Local modification: add clamping to fix bug in handleMouseWheel.\n\n\/**\n * SVGPan library 1.2\n * ====================\n *\n * Given an unique existing element with id \"viewport\", including the\n * the library into any SVG adds the following capabilities:\n *\n * - Mouse panning\n * - Mouse zooming (using the wheel)\n * - Object dargging\n *\n * Known issues:\n *\n * - Zooming (while panning) on Safari has still some issues\n *\n * Releases:\n *\n * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui\n *\tFixed a bug with browser mouse handler interaction\n *\n * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui\n *\tUpdated the zoom code to support the mouse wheel on Safari\/Chrome\n *\n * 1.0, Andrea Leofreddi\n *\tFirst release\n *\n * This code is licensed under the following BSD license:\n *\n * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without modification, are\n * permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this list of\n * conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright notice, this list\n * of conditions and the following disclaimer in the documentation and\/or other materials\n * provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ''AS IS'' AND ANY EXPRESS OR IMPLIED\n * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\n * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n * The views and conclusions contained in the software and documentation are those of the\n * authors and should not be interpreted as representing official policies, either expressed\n * or implied, of Andrea Leofreddi.\n *\/\n\nvar root = document.documentElement;\n\nvar state = 'none', stateTarget, stateOrigin, stateTf;\n\nsetupHandlers(root);\n\n\/**\n * Register handlers\n *\/\nfunction setupHandlers(root){\n\tsetAttributes(root, {\n\t\t\"onmouseup\" : \"add(evt)\",\n\t\t\"onmousedown\" : \"handleMouseDown(evt)\",\n\t\t\"onmousemove\" : \"handleMouseMove(evt)\",\n\t\t\"onmouseup\" : \"handleMouseUp(evt)\",\n\t\t\/\/\"onmouseout\" : \"handleMouseUp(evt)\", \/\/ Decomment this to stop the pan functionality when dragging out of the SVG element\n\t});\n\n\tif(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)\n\t\twindow.addEventListener('mousewheel', handleMouseWheel, false); \/\/ Chrome\/Safari\n\telse\n\t\twindow.addEventListener('DOMMouseScroll', handleMouseWheel, false); \/\/ Others\n\n\tvar g = svgDoc.getElementById(\"svg\");\n\tg.width = \"100%\";\n\tg.height = \"100%\";\n}\n\n\/**\n * Instance an SVGPoint object with given event coordinates.\n *\/\nfunction getEventPoint(evt) {\n\tvar p = root.createSVGPoint();\n\n\tp.x = evt.clientX;\n\tp.y = evt.clientY;\n\n\treturn p;\n}\n\n\/**\n * Sets the current transform matrix of an element.\n *\/\nfunction setCTM(element, matrix) {\n\tvar s = \"matrix(\" + matrix.a + \",\" + matrix.b + \",\" + matrix.c + \",\" + matrix.d + \",\" + matrix.e + \",\" + matrix.f + \")\";\n\n\telement.setAttribute(\"transform\", s);\n}\n\n\/**\n * Dumps a matrix to a string (useful for debug).\n *\/\nfunction dumpMatrix(matrix) {\n\tvar s = \"[ \" + matrix.a + \", \" + matrix.c + \", \" + matrix.e + \"\\n \" + matrix.b + \", \" + matrix.d + \", \" + matrix.f + \"\\n 0, 0, 1 ]\";\n\n\treturn s;\n}\n\n\/**\n * Sets attributes of an element.\n *\/\nfunction setAttributes(element, attributes){\n\tfor (i in attributes)\n\t\telement.setAttributeNS(null, i, attributes[i]);\n}\n\n\/**\n * Handle mouse move event.\n *\/\nfunction handleMouseWheel(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tvar delta;\n\n\tif(evt.wheelDelta)\n\t\tdelta = evt.wheelDelta \/ 3600; \/\/ Chrome\/Safari\n\telse\n\t\tdelta = evt.detail \/ -90; \/\/ Mozilla\n\n\tvar z = 1 + delta; \/\/ Zoom factor: 0.9\/1.1\n\n\t\/\/ Clamp to reasonable values.\n\t\/\/ The 0.1 check is important because\n\t\/\/ a very large scroll can turn into a\n\t\/\/ negative z, which rotates the image 180 degrees.\n\tif(z < 0.1)\n\t\tz = 0.1;\n\tif(z > 10.0)\n\t\tz = 10.0;\n\n\tvar g = svgDoc.getElementById(\"viewport\");\n\n\tvar p = getEventPoint(evt);\n\n\tp = p.matrixTransform(g.getCTM().inverse());\n\n\t\/\/ Compute new scale matrix in current mouse position\n\tvar k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);\n\n setCTM(g, g.getCTM().multiply(k));\n\n\tstateTf = stateTf.multiply(k.inverse());\n}\n\n\/**\n * Handle mouse move event.\n *\/\nfunction handleMouseMove(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tvar g = svgDoc.getElementById(\"viewport\");\n\n\tif(state == 'pan') {\n\t\t\/\/ Pan mode\n\t\tvar p = getEventPoint(evt).matrixTransform(stateTf);\n\n\t\tsetCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));\n\t} else if(state == 'move') {\n\t\t\/\/ Move mode\n\t\tvar p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());\n\n\t\tsetCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));\n\n\t\tstateOrigin = p;\n\t}\n}\n\n\/**\n * Handle click event.\n *\/\nfunction handleMouseDown(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tvar g = svgDoc.getElementById(\"viewport\");\n\n\tif(true || evt.target.tagName == \"svg\") {\n\t\t\/\/ Pan mode\n\t\tstate = 'pan';\n\n\t\tstateTf = g.getCTM().inverse();\n\n\t\tstateOrigin = getEventPoint(evt).matrixTransform(stateTf);\n\t} else {\n\t\t\/\/ Move mode\n\t\tstate = 'move';\n\n\t\tstateTarget = evt.target;\n\n\t\tstateTf = g.getCTM().inverse();\n\n\t\tstateOrigin = getEventPoint(evt).matrixTransform(stateTf);\n\t}\n}\n\n\/**\n * Handle mouse button release event.\n *\/\nfunction handleMouseUp(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tif(state == 'pan' || state == 'move') {\n\t\t\/\/ Quit pan mode\n\t\tstate = '';\n\t}\n}\n\n]]><\/script>\n`\n<commit_msg>cmd\/stackgraph: add attribution comment<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\n\/\/ Most of this code was purloined from go tool pprof.\n\ntype Summary struct {\n\tTitle string\n\tEdges map[Arc]*Edge\n\tNodes map[string]*Node\n\tTotalCalls int\n\tTotalEdges int\n}\n\nfunc writeSVG(w io.Writer, summary *Summary) error {\n\tdotcmd := exec.Command(\"dot\", \"-Tsvg\")\n\tvar output bytes.Buffer\n\tdotcmd.Stdout = &output\n\tdotcmd.Stderr = os.Stderr\n\tpr, pw := io.Pipe()\n\tdotcmd.Stdin = pr\n\tif err := dotcmd.Start(); err != nil {\n\t\tlog.Fatalf(\"cannot exec dot: %v\", err)\n\t}\n\tif err := dotTemplate.Execute(pw, summary); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpw.Close()\n\tif err := dotcmd.Wait(); err != nil {\n\t\tlog.Fatalf(\"dot failed: %v\", err)\n\t}\n\tif _, err := os.Stdout.Write(rewriteSVG(output.Bytes())); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc fontSize(x, y int) float64 {\n\tif y == 0 {\n\t\treturn 0\n\t}\n\treturn 8 + (50 * math.Sqrt(float64(x)\/float64(y)))\n}\n\nfunc lineAttrs(x, y int) string {\n\tvar frac float64\n\tif y == 0 {\n\t\tfrac = 0\n\t} else {\n\t\tfrac = 3 * float64(x) \/ float64(y)\n\t}\n\tif frac > 1 {\n\t\t\/\/ SVG output treats line widths < 1 poorly.\n\t\tfrac = 1\n\t}\n\tw := frac * 2\n\tif w < 1 {\n\t\tw = 1\n\t}\n\t\/\/ Dot sometimes segfaults if given edge weights that are too large, so\n\t\/\/ we cap the weights at a large value\n\tedgeWeight := math.Pow(float64(x), 0.7)\n\tif edgeWeight > 100000 {\n\t\tedgeWeight = 100000\n\t}\n\tedgeWeight = math.Floor(edgeWeight)\n\treturn fmt.Sprintf(`weight=%g, style=\"setlinewidth(%f)\"`, edgeWeight, w)\n}\n\n\/\/ See dot(1).\nvar dotTemplate = template.Must(template.New(\"\").Funcs(template.FuncMap{\n\t\"fontSize\": fontSize,\n\t\"lineAttrs\": lineAttrs,\n}).Parse(`\n\tdigraph {{.Title|printf \"%q\"}} {\n\tsize=\"8,11\"\n\tnode [width=0.375,height=0.25];\n\t{{range $n := .Nodes}}\n\tN{{$n.Id}} [\n\t\tlabel={{printf \"%s [%d]\" $n.Title $n.Count | printf \"%q\"}},\n\t\tshape=box,\n\t\tfontsize={{fontSize $n.Count $.TotalCalls}},\n\t];\n\t{{end}}\n\t{{range $arc, $e := .Edges}}\n\tN{{$arc.Node1.Id}} -> N{{$arc.Node0.Id}} [label={{$e.Count}}, {{lineAttrs $e.Count $.TotalEdges}}];\n\t{{end}}\n}\n`))\n\nfunc rewriteSVG(data []byte) []byte {\n\t\/\/ Dot's SVG output is\n\t\/\/\n\t\/\/ <svg width=\"___\" height=\"___\"\n\t\/\/ viewBox=\"___\" xmlns=...>\n\t\/\/ <g id=\"graph0\" transform=\"...\">\n\t\/\/ ...\n\t\/\/ <\/g>\n\t\/\/ <\/svg>\n\t\/\/\n\t\/\/ Change it to\n\t\/\/\n\t\/\/ <svg width=\"100%\" height=\"100%\"\n\t\/\/ xmlns=...>\n\t\/\/ $svg_javascript\n\t\/\/ <g id=\"viewport\" transform=\"translate(0,0)\">\n\t\/\/ <g id=\"graph0\" transform=\"...\">\n\t\/\/ ...\n\t\/\/ <\/g>\n\t\/\/ <\/g>\n\t\/\/ <\/svg>\n\n\t\/\/ Fix width, height; drop viewBox.\n\tdata = regexpReplace(data,\n\t\t`(?s)<svg width=\"[^\"]+\" height=\"[^\"]+\"(.*?)viewBox=\"[^\"]+\"`,\n\t\t`<svg width=\"100%\" height=\"100%\"$1`)\n\n\t\/\/ Insert script, viewport <g> above first <g>\n\tviewport := `<g id=\"viewport\" transform=\"translate(0,0)\">\n`\n\tdata = regexpReplace(data, `<g id=\"graph\\d\"(.*?)`, svgJavascript+viewport+\"$0\")\n\n\t\/\/ Insert final <\/g> above <\/svg>.\n\tdata = regexpReplace(data, `(.*)(<\/svg>)`, `$1<\/g>$2`)\n\tdata = regexpReplace(data, `<g id=\"graph\\d\"(.*?)`, `<g id=\"viewport\"$1`)\n\treturn data\n}\n\nfunc regexpReplace(data []byte, re string, replacement string) []byte {\n\trec := regexp.MustCompile(re)\n\treturn rec.ReplaceAll(data, []byte(replacement))\n}\n\nfunc init() {\n\tif strings.Contains(svgJavascript, \"$\") {\n\t\tpanic(\"javascript contains $ - can't be used as regexp substitute\")\n\t}\n}\n\nconst svgJavascript = `\n<script type=\"text\/ecmascript\"><![CDATA[\n\/\/ SVGPan\n\/\/ http:\/\/www.cyberz.org\/blog\/2009\/12\/08\/svgpan-a-javascript-svg-panzoomdrag-library\/\n\/\/ Local modification: if(true || ...) below to force panning, never moving.\n\/\/ Local modification: add clamping to fix bug in handleMouseWheel.\n\n\/**\n * SVGPan library 1.2\n * ====================\n *\n * Given an unique existing element with id \"viewport\", including the\n * the library into any SVG adds the following capabilities:\n *\n * - Mouse panning\n * - Mouse zooming (using the wheel)\n * - Object dargging\n *\n * Known issues:\n *\n * - Zooming (while panning) on Safari has still some issues\n *\n * Releases:\n *\n * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui\n *\tFixed a bug with browser mouse handler interaction\n *\n * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui\n *\tUpdated the zoom code to support the mouse wheel on Safari\/Chrome\n *\n * 1.0, Andrea Leofreddi\n *\tFirst release\n *\n * This code is licensed under the following BSD license:\n *\n * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without modification, are\n * permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this list of\n * conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright notice, this list\n * of conditions and the following disclaimer in the documentation and\/or other materials\n * provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ''AS IS'' AND ANY EXPRESS OR IMPLIED\n * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\n * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n * The views and conclusions contained in the software and documentation are those of the\n * authors and should not be interpreted as representing official policies, either expressed\n * or implied, of Andrea Leofreddi.\n *\/\n\nvar root = document.documentElement;\n\nvar state = 'none', stateTarget, stateOrigin, stateTf;\n\nsetupHandlers(root);\n\n\/**\n * Register handlers\n *\/\nfunction setupHandlers(root){\n\tsetAttributes(root, {\n\t\t\"onmouseup\" : \"add(evt)\",\n\t\t\"onmousedown\" : \"handleMouseDown(evt)\",\n\t\t\"onmousemove\" : \"handleMouseMove(evt)\",\n\t\t\"onmouseup\" : \"handleMouseUp(evt)\",\n\t\t\/\/\"onmouseout\" : \"handleMouseUp(evt)\", \/\/ Decomment this to stop the pan functionality when dragging out of the SVG element\n\t});\n\n\tif(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)\n\t\twindow.addEventListener('mousewheel', handleMouseWheel, false); \/\/ Chrome\/Safari\n\telse\n\t\twindow.addEventListener('DOMMouseScroll', handleMouseWheel, false); \/\/ Others\n\n\tvar g = svgDoc.getElementById(\"svg\");\n\tg.width = \"100%\";\n\tg.height = \"100%\";\n}\n\n\/**\n * Instance an SVGPoint object with given event coordinates.\n *\/\nfunction getEventPoint(evt) {\n\tvar p = root.createSVGPoint();\n\n\tp.x = evt.clientX;\n\tp.y = evt.clientY;\n\n\treturn p;\n}\n\n\/**\n * Sets the current transform matrix of an element.\n *\/\nfunction setCTM(element, matrix) {\n\tvar s = \"matrix(\" + matrix.a + \",\" + matrix.b + \",\" + matrix.c + \",\" + matrix.d + \",\" + matrix.e + \",\" + matrix.f + \")\";\n\n\telement.setAttribute(\"transform\", s);\n}\n\n\/**\n * Dumps a matrix to a string (useful for debug).\n *\/\nfunction dumpMatrix(matrix) {\n\tvar s = \"[ \" + matrix.a + \", \" + matrix.c + \", \" + matrix.e + \"\\n \" + matrix.b + \", \" + matrix.d + \", \" + matrix.f + \"\\n 0, 0, 1 ]\";\n\n\treturn s;\n}\n\n\/**\n * Sets attributes of an element.\n *\/\nfunction setAttributes(element, attributes){\n\tfor (i in attributes)\n\t\telement.setAttributeNS(null, i, attributes[i]);\n}\n\n\/**\n * Handle mouse move event.\n *\/\nfunction handleMouseWheel(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tvar delta;\n\n\tif(evt.wheelDelta)\n\t\tdelta = evt.wheelDelta \/ 3600; \/\/ Chrome\/Safari\n\telse\n\t\tdelta = evt.detail \/ -90; \/\/ Mozilla\n\n\tvar z = 1 + delta; \/\/ Zoom factor: 0.9\/1.1\n\n\t\/\/ Clamp to reasonable values.\n\t\/\/ The 0.1 check is important because\n\t\/\/ a very large scroll can turn into a\n\t\/\/ negative z, which rotates the image 180 degrees.\n\tif(z < 0.1)\n\t\tz = 0.1;\n\tif(z > 10.0)\n\t\tz = 10.0;\n\n\tvar g = svgDoc.getElementById(\"viewport\");\n\n\tvar p = getEventPoint(evt);\n\n\tp = p.matrixTransform(g.getCTM().inverse());\n\n\t\/\/ Compute new scale matrix in current mouse position\n\tvar k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);\n\n setCTM(g, g.getCTM().multiply(k));\n\n\tstateTf = stateTf.multiply(k.inverse());\n}\n\n\/**\n * Handle mouse move event.\n *\/\nfunction handleMouseMove(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tvar g = svgDoc.getElementById(\"viewport\");\n\n\tif(state == 'pan') {\n\t\t\/\/ Pan mode\n\t\tvar p = getEventPoint(evt).matrixTransform(stateTf);\n\n\t\tsetCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));\n\t} else if(state == 'move') {\n\t\t\/\/ Move mode\n\t\tvar p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());\n\n\t\tsetCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));\n\n\t\tstateOrigin = p;\n\t}\n}\n\n\/**\n * Handle click event.\n *\/\nfunction handleMouseDown(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tvar g = svgDoc.getElementById(\"viewport\");\n\n\tif(true || evt.target.tagName == \"svg\") {\n\t\t\/\/ Pan mode\n\t\tstate = 'pan';\n\n\t\tstateTf = g.getCTM().inverse();\n\n\t\tstateOrigin = getEventPoint(evt).matrixTransform(stateTf);\n\t} else {\n\t\t\/\/ Move mode\n\t\tstate = 'move';\n\n\t\tstateTarget = evt.target;\n\n\t\tstateTf = g.getCTM().inverse();\n\n\t\tstateOrigin = getEventPoint(evt).matrixTransform(stateTf);\n\t}\n}\n\n\/**\n * Handle mouse button release event.\n *\/\nfunction handleMouseUp(evt) {\n\tif(evt.preventDefault)\n\t\tevt.preventDefault();\n\n\tevt.returnValue = false;\n\n\tvar svgDoc = evt.target.ownerDocument;\n\n\tif(state == 'pan' || state == 'move') {\n\t\t\/\/ Quit pan mode\n\t\tstate = '';\n\t}\n}\n\n]]><\/script>\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/signature\"\n\t\"github.com\/syncthing\/syncthing\/lib\/upgrade\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetOutput(os.Stdout)\n\n\tflag.Parse()\n\n\tif flag.NArg() < 1 {\n\t\tlog.Println(`Usage:\n\tstsigtool <command>\n\nWhere command is one of:\n\n\tgen\n\t\t- generate a new key pair\n\n\tsign <privkeyfile> <datafile>\n\t\t- sign a file\n\n\tverify <signaturefile> <datafile>\n\t\t- verify a signature, using the built in public key\n\n\tverify <signaturefile> <datafile> <pubkeyfile>\n\t\t- verify a signature, using the specified public key file\n`)\n\t}\n\n\tswitch flag.Arg(0) {\n\tcase \"gen\":\n\t\tgen()\n\tcase \"sign\":\n\t\tsign(flag.Arg(1), flag.Arg(2))\n\tcase \"verify\":\n\t\tif flag.NArg() == 4 {\n\t\t\tverifyWithFile(flag.Arg(1), flag.Arg(2), flag.Arg(3))\n\t\t} else {\n\t\t\tverifyWithKey(flag.Arg(1), flag.Arg(2), upgrade.SigningKey)\n\t\t}\n\t}\n}\n\nfunc gen() {\n\tpriv, pub, err := signature.GenerateKeys()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tos.Stdout.Write(priv)\n\tos.Stdout.Write(pub)\n}\n\nfunc sign(keyname, dataname string) {\n\tprivkey, err := ioutil.ReadFile(keyname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfd, err := os.Open(dataname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer fd.Close()\n\n\tsig, err := signature.Sign(privkey, fd)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tos.Stdout.Write(sig)\n}\n\nfunc verifyWithFile(signame, dataname, keyname string) {\n\tpubkey, err := ioutil.ReadFile(keyname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tverifyWithKey(signame, dataname, pubkey)\n}\n\nfunc verifyWithKey(signame, dataname string, pubkey []byte) {\n\tsig, err := ioutil.ReadFile(signame)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfd, err := os.Open(dataname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer fd.Close()\n\n\terr = signature.Verify(pubkey, sig, fd)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"correct signature\")\n}\n<commit_msg>cmd\/stsigtool: Sign stdin when not given a file to sign, or when given \"-\"<commit_after>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/signature\"\n\t\"github.com\/syncthing\/syncthing\/lib\/upgrade\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetOutput(os.Stdout)\n\n\tflag.Parse()\n\n\tif flag.NArg() < 1 {\n\t\tlog.Println(`Usage:\n\tstsigtool <command>\n\nWhere command is one of:\n\n\tgen\n\t\t- generate a new key pair\n\n\tsign <privkeyfile> [datafile]\n\t\t- sign a file\n\n\tverify <signaturefile> <datafile>\n\t\t- verify a signature, using the built in public key\n\n\tverify <signaturefile> <datafile> <pubkeyfile>\n\t\t- verify a signature, using the specified public key file\n`)\n\t}\n\n\tswitch flag.Arg(0) {\n\tcase \"gen\":\n\t\tgen()\n\tcase \"sign\":\n\t\tsign(flag.Arg(1), flag.Arg(2))\n\tcase \"verify\":\n\t\tif flag.NArg() == 4 {\n\t\t\tverifyWithFile(flag.Arg(1), flag.Arg(2), flag.Arg(3))\n\t\t} else {\n\t\t\tverifyWithKey(flag.Arg(1), flag.Arg(2), upgrade.SigningKey)\n\t\t}\n\t}\n}\n\nfunc gen() {\n\tpriv, pub, err := signature.GenerateKeys()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tos.Stdout.Write(priv)\n\tos.Stdout.Write(pub)\n}\n\nfunc sign(keyname, dataname string) {\n\tprivkey, err := ioutil.ReadFile(keyname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar input io.Reader\n\tif dataname == \"-\" || dataname == \"\" {\n\t\tinput = os.Stdin\n\t} else {\n\t\tfd, err := os.Open(dataname)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer fd.Close()\n\t\tinput = fd\n\t}\n\n\tsig, err := signature.Sign(privkey, input)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tos.Stdout.Write(sig)\n}\n\nfunc verifyWithFile(signame, dataname, keyname string) {\n\tpubkey, err := ioutil.ReadFile(keyname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tverifyWithKey(signame, dataname, pubkey)\n}\n\nfunc verifyWithKey(signame, dataname string, pubkey []byte) {\n\tsig, err := ioutil.ReadFile(signame)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfd, err := os.Open(dataname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer fd.Close()\n\n\terr = signature.Verify(pubkey, sig, fd)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"correct signature\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"tchaik.com\/index\"\n\t\"tchaik.com\/index\/attr\"\n\n\t\"tchaik.com\/store\"\n)\n\n\/\/ Library is a type which encompases the components which form a full library.\ntype Library struct {\n\tindex.Library\n\n\tcollections map[string]index.Collection\n\tfilters map[string][]index.FilterItem\n\trecent []index.Path\n\tsearcher index.Searcher\n}\n\ntype libraryFileSystem struct {\n\tstore.FileSystem\n\tindex.Library\n}\n\n\/\/ Open implements store.FileSystem and rewrites ID values to their corresponding Location\n\/\/ values using the index.Library.\nfunc (l *libraryFileSystem) Open(ctx context.Context, path string) (http.File, error) {\n\tt, ok := l.Library.Track(strings.Trim(path, \"\/\")) \/\/ IDs arrive with leading slash\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"could not find track: %v\", path)\n\t}\n\n\tloc := t.GetString(\"Location\")\n\tif loc == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid (empty) location for track: %v\", path)\n\t}\n\tloc = filepath.ToSlash(loc)\n\treturn l.FileSystem.Open(ctx, loc)\n}\n\ntype group struct {\n\tName string\n\tKey index.Key\n\tTotalTime interface{} `json:\",omitempty\"`\n\tArtist interface{} `json:\",omitempty\"`\n\tAlbumArtist interface{} `json:\",omitempty\"`\n\tComposer interface{} `json:\",omitempty\"`\n\tBitRate interface{} `json:\",omitempty\"`\n\tDiscNumber interface{} `json:\",omitempty\"`\n\tListStyle interface{} `json:\",omitempty\"`\n\tID interface{} `json:\",omitempty\"`\n\tYear interface{} `json:\",omitempty\"`\n\tGroups []group `json:\",omitempty\"`\n\tTracks []track `json:\",omitempty\"`\n\tFavourite bool `json:\",omitempty\"`\n\tChecklist bool `json:\",omitempty\"`\n}\n\ntype track struct {\n\tID string `json:\",omitempty\"`\n\tName string `json:\",omitempty\"`\n\tAlbum string `json:\",omitempty\"`\n\tArtist []string `json:\",omitempty\"`\n\tAlbumArtist []string `json:\",omitempty\"`\n\tComposer []string `json:\",omitempty\"`\n\tYear int `json:\",omitempty\"`\n\tDiscNumber int `json:\",omitempty\"`\n\tTotalTime int `json:\",omitempty\"`\n\tBitRate int `json:\",omitempty\"`\n\tFavourite bool `json:\",omitempty\"`\n\tChecklist bool `json:\",omitempty\"`\n}\n\n\/\/ StringSliceEqual is a function used to compare two interface{} types which are assumed\n\/\/ to be of type []string (or interface{}(nil)).\nfunc StringSliceEqual(x, y interface{}) bool {\n\t\/\/ Annoyingly we have to cater for zero values from map[string]interface{}\n\t\/\/ which don't have the correct type wrapping the nil.\n\tif x == nil || y == nil {\n\t\treturn x == nil && y == nil\n\t}\n\txs := x.([]string) \/\/ NB: panics here are acceptable: should not be called on a non-'Strings' field.\n\tys := y.([]string)\n\tif len(xs) != len(ys) {\n\t\treturn false\n\t}\n\tfor i, xss := range xs {\n\t\tif ys[i] != xss {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc buildCollection(h group, c index.Collection) group {\n\tgetField := func(f string, g index.Group, c index.Collection) interface{} {\n\t\tif StringSliceEqual(g.Field(f), c.Field(f)) {\n\t\t\treturn nil\n\t\t}\n\t\treturn g.Field(f)\n\t}\n\n\tfor _, k := range c.Keys() {\n\t\tg := c.Get(k)\n\t\tg = index.FirstTrackAttr(attr.Strings(\"AlbumArtist\"), g)\n\t\tg = index.CommonGroupAttr([]attr.Interface{attr.Strings(\"Artist\")}, g)\n\t\th.Groups = append(h.Groups, group{\n\t\t\tName: g.Name(),\n\t\t\tKey: k,\n\t\t\tAlbumArtist: getField(\"AlbumArtist\", g, c),\n\t\t\tArtist: getField(\"Artist\", g, c),\n\t\t})\n\t}\n\treturn h\n}\n\nfunc build(g index.Group, key index.Key) group {\n\th := group{\n\t\tName: g.Name(),\n\t\tKey: key,\n\t\tTotalTime: g.Field(\"TotalTime\"),\n\t\tArtist: g.Field(\"Artist\"),\n\t\tAlbumArtist: g.Field(\"AlbumArtist\"),\n\t\tComposer: g.Field(\"Composer\"),\n\t\tYear: g.Field(\"Year\"),\n\t\tBitRate: g.Field(\"BitRate\"),\n\t\tDiscNumber: g.Field(\"DiscNumber\"),\n\t\tListStyle: g.Field(\"ListStyle\"),\n\t\tID: g.Field(\"ID\"),\n\t}\n\n\tif c, ok := g.(index.Collection); ok {\n\t\treturn buildCollection(h, c)\n\t}\n\n\tgetString := func(t index.Track, field string) string {\n\t\tif g.Field(field) != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn t.GetString(field)\n\t}\n\n\tgetStrings := func(t index.Track, field string) []string {\n\t\tif g.Field(field) != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn t.GetStrings(field)\n\t}\n\n\tgetInt := func(t index.Track, field string) int {\n\t\tif g.Field(field) != nil {\n\t\t\treturn 0\n\t\t}\n\t\treturn t.GetInt(field)\n\t}\n\n\tfor _, t := range g.Tracks() {\n\t\th.Tracks = append(h.Tracks, track{\n\t\t\tID: t.GetString(\"ID\"),\n\t\t\tName: t.GetString(\"Name\"),\n\t\t\tTotalTime: t.GetInt(\"TotalTime\"),\n\t\t\t\/\/ Potentially common fields (don't want to re-transmit everything)\n\t\t\tArtist: getStrings(t, \"Artist\"),\n\t\t\tAlbumArtist: getStrings(t, \"AlbumArtist\"),\n\t\t\tComposer: getStrings(t, \"Composer\"),\n\t\t\tAlbum: getString(t, \"Album\"),\n\t\t\tYear: getInt(t, \"Year\"),\n\t\t\tDiscNumber: getInt(t, \"DiscNumber\"),\n\t\t\tBitRate: getInt(t, \"BitRate\"),\n\t\t})\n\t}\n\treturn h\n}\n\nfunc (l *Library) Build(c index.Collection, p index.Path) (index.Group, error) {\n\tif len(p) == 0 {\n\t\treturn c, nil\n\t}\n\n\tvar g index.Group = c\n\tk := index.Key(p[0])\n\tg = c.Get(k)\n\n\tif g == nil {\n\t\treturn g, fmt.Errorf(\"invalid path: near '%v'\", p[0])\n\t}\n\n\tindex.Sort(g.Tracks(), index.MultiSort(index.SortByInt(\"DiscNumber\"), index.SortByInt(\"TrackNumber\")))\n\tg = index.Transform(g, index.SplitList(\"Artist\", \"AlbumArtist\", \"Composer\"))\n\tg = index.Transform(g, index.TrimTrackNumPrefix)\n\tc = index.Collect(g, index.ByPrefix(\"Name\"))\n\tg = index.SubTransform(c, index.TrimEnumPrefix)\n\tg = index.SumGroupIntAttr(\"TotalTime\", g)\n\tcommonFields := []attr.Interface{\n\t\tattr.String(\"Album\"),\n\t\tattr.Strings(\"Artist\"),\n\t\tattr.Strings(\"AlbumArtist\"),\n\t\tattr.Strings(\"Composer\"),\n\t\tattr.Int(\"Year\"),\n\t\tattr.Int(\"BitRate\"),\n\t\tattr.Int(\"DiscNumber\"),\n\t}\n\tg = index.CommonGroupAttr(commonFields, g)\n\tg = index.RemoveEmptyCollections(g)\n\n\tfor i, k := range p[1:] {\n\t\tvar ok bool\n\t\tc, ok = g.(index.Collection)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"retrieved Group is not a Collection\")\n\t\t}\n\n\t\tg = c.Get(k)\n\t\tif g == nil {\n\t\t\treturn g, fmt.Errorf(\"invalid path near '%v'\", p[1:][i])\n\t\t}\n\n\t\tif _, ok = g.(index.Collection); !ok {\n\t\t\tif i == len(p[1:])-1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"retrieved Group isn't a Collection: %v\", p)\n\t\t}\n\t}\n\tif g == nil {\n\t\treturn g, fmt.Errorf(\"could not find group\")\n\t}\n\tg = index.FirstTrackAttr(attr.String(\"ID\"), g)\n\treturn g, nil\n}\n\n\/\/ Fetch returns a group from the collection with the given path.\nfunc (l *Library) Fetch(c index.Collection, p index.Path) (group, error) {\n\tif len(p) == 0 {\n\t\treturn build(c, index.Key(\"Root\")), nil\n\t}\n\n\tk := index.Key(p[0])\n\tg, err := l.Build(c, p)\n\tif err != nil {\n\t\treturn group{}, err\n\t}\n\treturn build(g, k), nil\n}\n\n\/\/ FileSystem wraps the http.FileSystem in a library lookup which will translate \/ID\n\/\/ requests into their corresponding track paths.\nfunc (l *Library) FileSystem(fs store.FileSystem) store.FileSystem {\n\treturn store.Trace(&libraryFileSystem{fs, l.Library}, \"libraryFileSystem\")\n}\n\n\/\/ ExpandPaths constructs a collection (group) whose sub-groups are taken from the \"Root\"\n\/\/ collection.\nfunc (l *Library) ExpandPaths(paths []index.Path) group {\n\treturn build(index.NewPathsCollection(l.collections[\"Root\"], paths), index.Key(\"Root\"))\n}\n<commit_msg>Refactored root group construction into rootCollection type.<commit_after>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"tchaik.com\/index\"\n\t\"tchaik.com\/index\/attr\"\n\n\t\"tchaik.com\/store\"\n)\n\n\/\/ Library is a type which encompases the components which form a full library.\ntype Library struct {\n\tindex.Library\n\n\tcollections map[string]index.Collection\n\tfilters map[string][]index.FilterItem\n\trecent []index.Path\n\tsearcher index.Searcher\n}\n\ntype libraryFileSystem struct {\n\tstore.FileSystem\n\tindex.Library\n}\n\n\/\/ Open implements store.FileSystem and rewrites ID values to their corresponding Location\n\/\/ values using the index.Library.\nfunc (l *libraryFileSystem) Open(ctx context.Context, path string) (http.File, error) {\n\tt, ok := l.Library.Track(strings.Trim(path, \"\/\")) \/\/ IDs arrive with leading slash\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"could not find track: %v\", path)\n\t}\n\n\tloc := t.GetString(\"Location\")\n\tif loc == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid (empty) location for track: %v\", path)\n\t}\n\tloc = filepath.ToSlash(loc)\n\treturn l.FileSystem.Open(ctx, loc)\n}\n\ntype group struct {\n\tName string\n\tKey index.Key\n\tTotalTime interface{} `json:\",omitempty\"`\n\tArtist interface{} `json:\",omitempty\"`\n\tAlbumArtist interface{} `json:\",omitempty\"`\n\tComposer interface{} `json:\",omitempty\"`\n\tBitRate interface{} `json:\",omitempty\"`\n\tDiscNumber interface{} `json:\",omitempty\"`\n\tListStyle interface{} `json:\",omitempty\"`\n\tID interface{} `json:\",omitempty\"`\n\tYear interface{} `json:\",omitempty\"`\n\tGroups []group `json:\",omitempty\"`\n\tTracks []track `json:\",omitempty\"`\n\tFavourite bool `json:\",omitempty\"`\n\tChecklist bool `json:\",omitempty\"`\n}\n\ntype track struct {\n\tID string `json:\",omitempty\"`\n\tName string `json:\",omitempty\"`\n\tAlbum string `json:\",omitempty\"`\n\tArtist []string `json:\",omitempty\"`\n\tAlbumArtist []string `json:\",omitempty\"`\n\tComposer []string `json:\",omitempty\"`\n\tYear int `json:\",omitempty\"`\n\tDiscNumber int `json:\",omitempty\"`\n\tTotalTime int `json:\",omitempty\"`\n\tBitRate int `json:\",omitempty\"`\n\tFavourite bool `json:\",omitempty\"`\n\tChecklist bool `json:\",omitempty\"`\n}\n\n\/\/ StringSliceEqual is a function used to compare two interface{} types which are assumed\n\/\/ to be of type []string (or interface{}(nil)).\nfunc StringSliceEqual(x, y interface{}) bool {\n\t\/\/ Annoyingly we have to cater for zero values from map[string]interface{}\n\t\/\/ which don't have the correct type wrapping the nil.\n\tif x == nil || y == nil {\n\t\treturn x == nil && y == nil\n\t}\n\txs := x.([]string) \/\/ NB: panics here are acceptable: should not be called on a non-'Strings' field.\n\tys := y.([]string)\n\tif len(xs) != len(ys) {\n\t\treturn false\n\t}\n\tfor i, xss := range xs {\n\t\tif ys[i] != xss {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc buildCollection(h group, c index.Collection) group {\n\tgetField := func(f string, g index.Group, c index.Collection) interface{} {\n\t\tif StringSliceEqual(g.Field(f), c.Field(f)) {\n\t\t\treturn nil\n\t\t}\n\t\treturn g.Field(f)\n\t}\n\n\tfor _, k := range c.Keys() {\n\t\tg := c.Get(k)\n\t\tg = index.FirstTrackAttr(attr.Strings(\"AlbumArtist\"), g)\n\t\tg = index.CommonGroupAttr([]attr.Interface{attr.Strings(\"Artist\")}, g)\n\t\th.Groups = append(h.Groups, group{\n\t\t\tName: g.Name(),\n\t\t\tKey: k,\n\t\t\tAlbumArtist: getField(\"AlbumArtist\", g, c),\n\t\t\tArtist: getField(\"Artist\", g, c),\n\t\t})\n\t}\n\treturn h\n}\n\nfunc build(g index.Group, key index.Key) group {\n\th := group{\n\t\tName: g.Name(),\n\t\tKey: key,\n\t\tTotalTime: g.Field(\"TotalTime\"),\n\t\tArtist: g.Field(\"Artist\"),\n\t\tAlbumArtist: g.Field(\"AlbumArtist\"),\n\t\tComposer: g.Field(\"Composer\"),\n\t\tYear: g.Field(\"Year\"),\n\t\tBitRate: g.Field(\"BitRate\"),\n\t\tDiscNumber: g.Field(\"DiscNumber\"),\n\t\tListStyle: g.Field(\"ListStyle\"),\n\t\tID: g.Field(\"ID\"),\n\t}\n\n\tif c, ok := g.(index.Collection); ok {\n\t\treturn buildCollection(h, c)\n\t}\n\n\tgetString := func(t index.Track, field string) string {\n\t\tif g.Field(field) != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn t.GetString(field)\n\t}\n\n\tgetStrings := func(t index.Track, field string) []string {\n\t\tif g.Field(field) != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn t.GetStrings(field)\n\t}\n\n\tgetInt := func(t index.Track, field string) int {\n\t\tif g.Field(field) != nil {\n\t\t\treturn 0\n\t\t}\n\t\treturn t.GetInt(field)\n\t}\n\n\tfor _, t := range g.Tracks() {\n\t\th.Tracks = append(h.Tracks, track{\n\t\t\tID: t.GetString(\"ID\"),\n\t\t\tName: t.GetString(\"Name\"),\n\t\t\tTotalTime: t.GetInt(\"TotalTime\"),\n\t\t\t\/\/ Potentially common fields (don't want to re-transmit everything)\n\t\t\tArtist: getStrings(t, \"Artist\"),\n\t\t\tAlbumArtist: getStrings(t, \"AlbumArtist\"),\n\t\t\tComposer: getStrings(t, \"Composer\"),\n\t\t\tAlbum: getString(t, \"Album\"),\n\t\t\tYear: getInt(t, \"Year\"),\n\t\t\tDiscNumber: getInt(t, \"DiscNumber\"),\n\t\t\tBitRate: getInt(t, \"BitRate\"),\n\t\t})\n\t}\n\treturn h\n}\n\ntype rootCollection struct {\n\tindex.Collection\n}\n\nfunc (r *rootCollection) Get(k index.Key) index.Group {\n\tg := r.Collection.Get(k)\n\tif g == nil {\n\t\treturn g\n\t}\n\n\tindex.Sort(g.Tracks(), index.MultiSort(index.SortByInt(\"DiscNumber\"), index.SortByInt(\"TrackNumber\")))\n\tg = index.Transform(g, index.SplitList(\"Artist\", \"AlbumArtist\", \"Composer\"))\n\tg = index.Transform(g, index.TrimTrackNumPrefix)\n\tc := index.Collect(g, index.ByPrefix(\"Name\"))\n\tg = index.SubTransform(c, index.TrimEnumPrefix)\n\tg = index.SumGroupIntAttr(\"TotalTime\", g)\n\tcommonFields := []attr.Interface{\n\t\tattr.String(\"Album\"),\n\t\tattr.Strings(\"Artist\"),\n\t\tattr.Strings(\"AlbumArtist\"),\n\t\tattr.Strings(\"Composer\"),\n\t\tattr.Int(\"Year\"),\n\t\tattr.Int(\"BitRate\"),\n\t\tattr.Int(\"DiscNumber\"),\n\t}\n\tg = index.CommonGroupAttr(commonFields, g)\n\tg = index.RemoveEmptyCollections(g)\n\treturn g\n}\n\nfunc (l *Library) Build(c index.Collection, p index.Path) (index.Group, error) {\n\tif len(p) == 0 {\n\t\treturn c, nil\n\t}\n\n\tc = &rootCollection{c}\n\tvar g index.Group = c\n\tk := index.Key(p[0])\n\tg = c.Get(k)\n\n\tif g == nil {\n\t\treturn g, fmt.Errorf(\"invalid path: near '%v'\", p[0])\n\t}\n\n\tfor i, k := range p[1:] {\n\t\tvar ok bool\n\t\tc, ok = g.(index.Collection)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"retrieved Group is not a Collection\")\n\t\t}\n\n\t\tg = c.Get(k)\n\t\tif g == nil {\n\t\t\treturn g, fmt.Errorf(\"invalid path near '%v'\", p[1:][i])\n\t\t}\n\n\t\tif _, ok = g.(index.Collection); !ok {\n\t\t\tif i == len(p[1:])-1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"retrieved Group isn't a Collection: %v\", p)\n\t\t}\n\t}\n\tif g == nil {\n\t\treturn g, fmt.Errorf(\"could not find group\")\n\t}\n\tg = index.FirstTrackAttr(attr.String(\"ID\"), g)\n\treturn g, nil\n}\n\n\/\/ Fetch returns a group from the collection with the given path.\nfunc (l *Library) Fetch(c index.Collection, p index.Path) (group, error) {\n\tif len(p) == 0 {\n\t\treturn build(c, index.Key(\"Root\")), nil\n\t}\n\n\tk := index.Key(p[0])\n\tg, err := l.Build(c, p)\n\tif err != nil {\n\t\treturn group{}, err\n\t}\n\treturn build(g, k), nil\n}\n\n\/\/ FileSystem wraps the http.FileSystem in a library lookup which will translate \/ID\n\/\/ requests into their corresponding track paths.\nfunc (l *Library) FileSystem(fs store.FileSystem) store.FileSystem {\n\treturn store.Trace(&libraryFileSystem{fs, l.Library}, \"libraryFileSystem\")\n}\n\n\/\/ ExpandPaths constructs a collection (group) whose sub-groups are taken from the \"Root\"\n\/\/ collection.\nfunc (l *Library) ExpandPaths(paths []index.Path) group {\n\treturn build(index.NewPathsCollection(l.collections[\"Root\"], paths), index.Key(\"Root\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/dhowden\/tag\"\n\t\"github.com\/tchaik\/tchaik\/index\"\n)\n\nvar fileExtensions = []string{\".mp3\", \".m4a\", \".flac\"}\n\n\/\/ Library is an implementation of index.Library.\ntype Library struct {\n\ttracks map[string]*Track\n}\n\nfunc (l *Library) Track(id string) (index.Track, bool) {\n\tt, ok := l.tracks[id]\n\treturn t, ok\n}\n\nfunc (l *Library) Tracks() []index.Track {\n\ttracks := make([]index.Track, 0, len(l.tracks))\n\tfor _, t := range l.tracks {\n\t\ttracks = append(tracks, t)\n\t}\n\treturn tracks\n}\n\n\/\/ Track is a wrapper around tag.Metadata which implements index.Track\ntype Track struct {\n\ttag.Metadata\n\tLocation string\n\tFileInfo os.FileInfo\n\tCreatedTime time.Time\n}\n\nfunc (m *Track) GetString(name string) string {\n\tswitch name {\n\tcase \"Name\":\n\t\ttitle := m.Title()\n\t\tif title == \"\" {\n\t\t\tfileName := m.FileInfo.Name()\n\t\t\text := filepath.Ext(fileName)\n\t\t\ttitle = strings.TrimSuffix(fileName, ext)\n\t\t}\n\t\treturn title\n\tcase \"Album\":\n\t\treturn m.Album()\n\tcase \"Artist\":\n\t\treturn m.Artist()\n\tcase \"Composer\":\n\t\treturn m.Composer()\n\tcase \"Location\":\n\t\treturn m.Location\n\tcase \"TrackID\":\n\t\tsum := sha1.Sum([]byte(m.Location))\n\t\treturn string(fmt.Sprintf(\"%x\", sum))\n\t}\n\treturn \"\"\n}\n\nfunc (m *Track) GetInt(name string) int {\n\tswitch name {\n\tcase \"Year\":\n\t\treturn m.Year()\n\tcase \"TrackNumber\":\n\t\tx, _ := m.Track()\n\t\treturn x\n\tcase \"TrackCount\":\n\t\t_, n := m.Track()\n\t\treturn n\n\tcase \"DiscNumber\":\n\t\tx, _ := m.Disc()\n\t\treturn x\n\tcase \"DiscCount\":\n\t\t_, n := m.Disc()\n\t\treturn n\n\t}\n\treturn 0\n}\n\nfunc (m *Track) GetTime(name string) time.Time {\n\tswitch name {\n\tcase \"DateModified\":\n\t\treturn m.FileInfo.ModTime()\n\tcase \"DateAdded\":\n\t\treturn m.CreatedTime\n\t}\n\treturn time.Time{}\n}\n\nfunc validExtension(path string) bool {\n\text := strings.ToLower(filepath.Ext(filepath.Base(path)))\n\tfor _, x := range fileExtensions {\n\t\tif ext == x {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc walk(root string) <-chan string {\n\tch := make(chan string)\n\tfn := func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tch <- path\n\t\treturn nil\n\t}\n\n\tgo func() {\n\t\terr := filepath.Walk(root, fn)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n\nfunc processPath(path string) (*Track, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tm, err := tag.ReadFrom(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfo, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreatedTime, err := getCreatedTime(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Track{\n\t\tMetadata: m,\n\t\tLocation: path,\n\t\tFileInfo: fileInfo,\n\t\tCreatedTime: createdTime,\n\t}, nil\n}\n<commit_msg>Add AlbumArtist support to the walk mode in tchimport<commit_after>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/dhowden\/tag\"\n\t\"github.com\/tchaik\/tchaik\/index\"\n)\n\nvar fileExtensions = []string{\".mp3\", \".m4a\", \".flac\"}\n\n\/\/ Library is an implementation of index.Library.\ntype Library struct {\n\ttracks map[string]*Track\n}\n\nfunc (l *Library) Track(id string) (index.Track, bool) {\n\tt, ok := l.tracks[id]\n\treturn t, ok\n}\n\nfunc (l *Library) Tracks() []index.Track {\n\ttracks := make([]index.Track, 0, len(l.tracks))\n\tfor _, t := range l.tracks {\n\t\ttracks = append(tracks, t)\n\t}\n\treturn tracks\n}\n\n\/\/ Track is a wrapper around tag.Metadata which implements index.Track\ntype Track struct {\n\ttag.Metadata\n\tLocation string\n\tFileInfo os.FileInfo\n\tCreatedTime time.Time\n}\n\nfunc (m *Track) GetString(name string) string {\n\tswitch name {\n\tcase \"Name\":\n\t\ttitle := m.Title()\n\t\tif title == \"\" {\n\t\t\tfileName := m.FileInfo.Name()\n\t\t\text := filepath.Ext(fileName)\n\t\t\ttitle = strings.TrimSuffix(fileName, ext)\n\t\t}\n\t\treturn title\n\tcase \"Album\":\n\t\treturn m.Album()\n\tcase \"Artist\":\n\t\treturn m.Artist()\n\tcase \"AlbumArtist\":\n\t\treturn m.AlbumArtist()\n\tcase \"Composer\":\n\t\treturn m.Composer()\n\tcase \"Location\":\n\t\treturn m.Location\n\tcase \"TrackID\":\n\t\tsum := sha1.Sum([]byte(m.Location))\n\t\treturn string(fmt.Sprintf(\"%x\", sum))\n\t}\n\treturn \"\"\n}\n\nfunc (m *Track) GetInt(name string) int {\n\tswitch name {\n\tcase \"Year\":\n\t\treturn m.Year()\n\tcase \"TrackNumber\":\n\t\tx, _ := m.Track()\n\t\treturn x\n\tcase \"TrackCount\":\n\t\t_, n := m.Track()\n\t\treturn n\n\tcase \"DiscNumber\":\n\t\tx, _ := m.Disc()\n\t\treturn x\n\tcase \"DiscCount\":\n\t\t_, n := m.Disc()\n\t\treturn n\n\t}\n\treturn 0\n}\n\nfunc (m *Track) GetTime(name string) time.Time {\n\tswitch name {\n\tcase \"DateModified\":\n\t\treturn m.FileInfo.ModTime()\n\tcase \"DateAdded\":\n\t\treturn m.CreatedTime\n\t}\n\treturn time.Time{}\n}\n\nfunc validExtension(path string) bool {\n\text := strings.ToLower(filepath.Ext(filepath.Base(path)))\n\tfor _, x := range fileExtensions {\n\t\tif ext == x {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc walk(root string) <-chan string {\n\tch := make(chan string)\n\tfn := func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tch <- path\n\t\treturn nil\n\t}\n\n\tgo func() {\n\t\terr := filepath.Walk(root, fn)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n\nfunc processPath(path string) (*Track, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tm, err := tag.ReadFrom(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfo, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreatedTime, err := getCreatedTime(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Track{\n\t\tMetadata: m,\n\t\tLocation: path,\n\t\tFileInfo: fileInfo,\n\t\tCreatedTime: createdTime,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jvehent\/cljs\"\n\t\"mig\"\n\tmigdb \"mig\/database\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype pagination struct {\n\tLimit float64 `json:\"limit\"`\n\tOffset float64 `json:\"offset\"`\n\tNext string `json:\"next\"`\n}\n\n\/\/ search runs searches\nfunc search(respWriter http.ResponseWriter, request *http.Request) {\n\tvar (\n\t\terr error\n\t\tp migdb.SearchParameters\n\t\tfilterFound bool\n\t)\n\topid := getOpID(request)\n\tloc := fmt.Sprintf(\"%s%s\", ctx.Server.Host, request.URL.String())\n\tresource := cljs.New(loc)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\t\/\/ on panic, log and return error to client, including the search parameters\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"%v\", e)}.Err()\n\t\t\tresource.AddItem(cljs.Item{\n\t\t\t\tHref: loc,\n\t\t\t\tData: []cljs.Data{{Name: \"search parameters\", Value: p}},\n\t\t\t})\n\t\t\tresource.SetError(cljs.Error{Code: fmt.Sprintf(\"%.0f\", opid), Message: fmt.Sprintf(\"%v\", e)})\n\t\t\tif fmt.Sprintf(\"%v\", e) == \"no results found\" {\n\t\t\t\trespond(404, resource, respWriter, request)\n\t\t\t} else {\n\t\t\t\trespond(500, resource, respWriter, request)\n\t\t\t}\n\n\t\t}\n\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: \"leaving search()\"}.Debug()\n\t}()\n\n\tp, filterFound, err = parseSearchParameters(request.URL.Query())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ run the search based on the type\n\tvar results interface{}\n\tswitch p.Type {\n\tcase \"action\":\n\t\tresults, err = ctx.DB.SearchActions(p)\n\tcase \"agent\":\n\t\tif p.Target != \"\" {\n\t\t\tresults, err = ctx.DB.ActiveAgentsByTarget(p.Target)\n\t\t} else {\n\t\t\tresults, err = ctx.DB.SearchAgents(p)\n\t\t}\n\tcase \"command\":\n\t\tresults, err = ctx.DB.SearchCommands(p, filterFound)\n\tcase \"investigator\":\n\t\tresults, err = ctx.DB.SearchInvestigators(p)\n\tdefault:\n\t\tpanic(\"search type is invalid\")\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ prepare the output in the requested format\n\tswitch p.Report {\n\tcase \"complianceitems\":\n\t\tif p.Type != \"command\" {\n\t\t\tpanic(\"compliance items reporting is only available for the 'command' type\")\n\t\t}\n\t\titems, err := commandsToComplianceItems(results.([]mig.Command))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor i, item := range items {\n\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\tHref: fmt.Sprintf(\"%s%s\/search?type=command?agentname=%s&commandid=%s&actionid=%s&threatfamily=compliance&report=complianceitems\",\n\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, item.Target, p.CommandID, p.ActionID),\n\t\t\t\tData: []cljs.Data{{Name: \"compliance item\", Value: item}},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif float64(i) > p.Limit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tcase \"geolocations\":\n\t\tif p.Type != \"command\" {\n\t\t\tpanic(\"geolocations reporting is only available for the 'command' type\")\n\t\t}\n\t\titems, err := commandsToGeolocations(results.([]mig.Command))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor i, item := range items {\n\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\tHref: fmt.Sprintf(\"%s%s\/search?type=command?agentname=%s&commandid=%s&actionid=%s&report=geolocations\",\n\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, item.Endpoint, p.CommandID, p.ActionID),\n\t\t\t\tData: []cljs.Data{{Name: \"geolocation\", Value: item}},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif float64(i) > p.Limit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tswitch p.Type {\n\t\tcase \"action\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d actions\", len(results.([]mig.Action)))}\n\t\t\tif len(results.([]mig.Action)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Action) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/action?actionid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"agent\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d agents\", len(results.([]mig.Agent)))}\n\t\t\tif len(results.([]mig.Agent)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Agent) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/agent?agentid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"command\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d commands\", len(results.([]mig.Command)))}\n\t\t\tif len(results.([]mig.Command)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Command) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/command?commandid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"investigator\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d investigators\", len(results.([]mig.Investigator)))}\n\t\t\tif len(results.([]mig.Investigator)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Investigator) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/investigator?investigatorid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ if needed, add pagination info\n\tif p.Offset > 0 {\n\t\t\/\/ go to next limit and offset values before making the next URL\n\t\tnextCount := p.Limit - p.Offset\n\t\tnextP := p\n\t\tnextP.Limit += nextCount\n\t\tnextP.Offset += nextCount\n\t\tpage := pagination{\n\t\t\tLimit: p.Limit,\n\t\t\tOffset: p.Offset,\n\t\t\tNext: ctx.Server.BaseURL + \"\/search?\" + nextP.String(),\n\t\t}\n\t\terr = resource.AddItem(cljs.Item{\n\t\t\tHref: loc,\n\t\t\tData: []cljs.Data{{Name: \"pagination\", Value: page}},\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\t\/\/ add search parameters at the end of the response\n\terr = resource.AddItem(cljs.Item{\n\t\tHref: loc,\n\t\tData: []cljs.Data{{Name: \"search parameters\", Value: p}},\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trespond(200, resource, respWriter, request)\n}\n\n\/\/ truere is a case insensitive regex that matches the string 'true'\nvar truere = regexp.MustCompile(\"(?i)^true$\")\n\n\/\/ false is a case insensitive regex that matches the string 'false'\nvar falsere = regexp.MustCompile(\"(?i)^false$\")\n\n\/\/ parseSearchParameters transforms a query string into search parameters in the migdb format\nfunc parseSearchParameters(qp url.Values) (p migdb.SearchParameters, filterFound bool, err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"parseSearchParameters()-> %v\", e)\n\t\t}\n\t}()\n\tp = migdb.NewSearchParameters()\n\tfor queryParams, _ := range qp {\n\t\tswitch queryParams {\n\t\tcase \"actionname\":\n\t\t\tp.ActionName = qp[\"actionname\"][0]\n\t\tcase \"actionid\":\n\t\t\tp.ActionID = qp[\"actionid\"][0]\n\t\tcase \"after\":\n\t\t\tp.After, err = time.Parse(time.RFC3339, qp[\"after\"][0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"after date not in RFC3339 format\")\n\t\t\t}\n\t\tcase \"agentid\":\n\t\t\tp.AgentID = qp[\"agentid\"][0]\n\t\tcase \"agentname\":\n\t\t\tp.AgentName = qp[\"agentname\"][0]\n\t\tcase \"before\":\n\t\t\tp.Before, err = time.Parse(time.RFC3339, qp[\"before\"][0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"before date not in RFC3339 format\")\n\t\t\t}\n\t\tcase \"commandid\":\n\t\t\tp.CommandID = qp[\"commandid\"][0]\n\t\tcase \"foundanything\":\n\t\t\tif truere.MatchString(qp[\"foundanything\"][0]) {\n\t\t\t\tp.FoundAnything = true\n\t\t\t} else if falsere.MatchString(qp[\"foundanything\"][0]) {\n\t\t\t\tp.FoundAnything = false\n\t\t\t} else {\n\t\t\t\tpanic(\"foundanything parameter must be true or false\")\n\t\t\t}\n\t\t\tfilterFound = true\n\t\tcase \"investigatorid\":\n\t\t\tp.InvestigatorID = qp[\"investigatorid\"][0]\n\t\tcase \"investigatorname\":\n\t\t\tp.InvestigatorName = qp[\"investigatorname\"][0]\n\t\tcase \"limit\":\n\t\t\tp.Limit, err = strconv.ParseFloat(qp[\"limit\"][0], 64)\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"invalid limit parameter\")\n\t\t\t}\n\t\tcase \"offset\":\n\t\t\tp.Offset, err = strconv.ParseFloat(qp[\"offset\"][0], 64)\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"invalid offset parameter\")\n\t\t\t}\n\t\tcase \"report\":\n\t\t\tswitch qp[\"report\"][0] {\n\t\t\tcase \"complianceitems\":\n\t\t\t\tp.Report = qp[\"report\"][0]\n\t\t\tcase \"geolocations\":\n\t\t\t\tp.Report = qp[\"report\"][0]\n\t\t\tdefault:\n\t\t\t\tpanic(\"report not implemented\")\n\t\t\t}\n\t\tcase \"status\":\n\t\t\tp.Status = qp[\"status\"][0]\n\t\tcase \"target\":\n\t\t\tp.Target = qp[\"target\"][0]\n\t\tcase \"threatfamily\":\n\t\t\tp.ThreatFamily = qp[\"threatfamily\"][0]\n\t\tcase \"type\":\n\t\t\tp.Type = qp[\"type\"][0]\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>[minor] fix next offset indicator in api response<commit_after>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jvehent\/cljs\"\n\t\"mig\"\n\tmigdb \"mig\/database\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype pagination struct {\n\tLimit float64 `json:\"limit\"`\n\tOffset float64 `json:\"offset\"`\n\tNext string `json:\"next\"`\n}\n\n\/\/ search runs searches\nfunc search(respWriter http.ResponseWriter, request *http.Request) {\n\tvar (\n\t\terr error\n\t\tp migdb.SearchParameters\n\t\tfilterFound bool\n\t)\n\topid := getOpID(request)\n\tloc := fmt.Sprintf(\"%s%s\", ctx.Server.Host, request.URL.String())\n\tresource := cljs.New(loc)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\t\/\/ on panic, log and return error to client, including the search parameters\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"%v\", e)}.Err()\n\t\t\tresource.AddItem(cljs.Item{\n\t\t\t\tHref: loc,\n\t\t\t\tData: []cljs.Data{{Name: \"search parameters\", Value: p}},\n\t\t\t})\n\t\t\tresource.SetError(cljs.Error{Code: fmt.Sprintf(\"%.0f\", opid), Message: fmt.Sprintf(\"%v\", e)})\n\t\t\tif fmt.Sprintf(\"%v\", e) == \"no results found\" {\n\t\t\t\trespond(404, resource, respWriter, request)\n\t\t\t} else {\n\t\t\t\trespond(500, resource, respWriter, request)\n\t\t\t}\n\n\t\t}\n\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: \"leaving search()\"}.Debug()\n\t}()\n\n\tp, filterFound, err = parseSearchParameters(request.URL.Query())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ run the search based on the type\n\tvar results interface{}\n\tswitch p.Type {\n\tcase \"action\":\n\t\tresults, err = ctx.DB.SearchActions(p)\n\tcase \"agent\":\n\t\tif p.Target != \"\" {\n\t\t\tresults, err = ctx.DB.ActiveAgentsByTarget(p.Target)\n\t\t} else {\n\t\t\tresults, err = ctx.DB.SearchAgents(p)\n\t\t}\n\tcase \"command\":\n\t\tresults, err = ctx.DB.SearchCommands(p, filterFound)\n\tcase \"investigator\":\n\t\tresults, err = ctx.DB.SearchInvestigators(p)\n\tdefault:\n\t\tpanic(\"search type is invalid\")\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ prepare the output in the requested format\n\tswitch p.Report {\n\tcase \"complianceitems\":\n\t\tif p.Type != \"command\" {\n\t\t\tpanic(\"compliance items reporting is only available for the 'command' type\")\n\t\t}\n\t\titems, err := commandsToComplianceItems(results.([]mig.Command))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor i, item := range items {\n\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\tHref: fmt.Sprintf(\"%s%s\/search?type=command?agentname=%s&commandid=%s&actionid=%s&threatfamily=compliance&report=complianceitems\",\n\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, item.Target, p.CommandID, p.ActionID),\n\t\t\t\tData: []cljs.Data{{Name: \"compliance item\", Value: item}},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif float64(i) > p.Limit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tcase \"geolocations\":\n\t\tif p.Type != \"command\" {\n\t\t\tpanic(\"geolocations reporting is only available for the 'command' type\")\n\t\t}\n\t\titems, err := commandsToGeolocations(results.([]mig.Command))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor i, item := range items {\n\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\tHref: fmt.Sprintf(\"%s%s\/search?type=command?agentname=%s&commandid=%s&actionid=%s&report=geolocations\",\n\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, item.Endpoint, p.CommandID, p.ActionID),\n\t\t\t\tData: []cljs.Data{{Name: \"geolocation\", Value: item}},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif float64(i) > p.Limit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tswitch p.Type {\n\t\tcase \"action\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d actions\", len(results.([]mig.Action)))}\n\t\t\tif len(results.([]mig.Action)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Action) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/action?actionid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"agent\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d agents\", len(results.([]mig.Agent)))}\n\t\t\tif len(results.([]mig.Agent)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Agent) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/agent?agentid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"command\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d commands\", len(results.([]mig.Command)))}\n\t\t\tif len(results.([]mig.Command)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Command) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/command?commandid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"investigator\":\n\t\t\tctx.Channels.Log <- mig.Log{OpID: opid, Desc: fmt.Sprintf(\"returning search results with %d investigators\", len(results.([]mig.Investigator)))}\n\t\t\tif len(results.([]mig.Investigator)) == 0 {\n\t\t\t\tpanic(\"no results found\")\n\t\t\t}\n\t\t\tfor i, r := range results.([]mig.Investigator) {\n\t\t\t\terr = resource.AddItem(cljs.Item{\n\t\t\t\t\tHref: fmt.Sprintf(\"%s%s\/investigator?investigatorid=%.0f\",\n\t\t\t\t\t\tctx.Server.Host, ctx.Server.BaseRoute, r.ID),\n\t\t\t\t\tData: []cljs.Data{{Name: p.Type, Value: r}},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif float64(i) > p.Limit {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ if needed, add pagination info\n\tif p.Offset > 0 {\n\t\tnextP := p\n\t\tnextP.Offset += p.Limit\n\t\tpage := pagination{\n\t\t\tLimit: p.Limit,\n\t\t\tOffset: p.Offset,\n\t\t\tNext: ctx.Server.BaseURL + \"\/search?\" + nextP.String(),\n\t\t}\n\t\terr = resource.AddItem(cljs.Item{\n\t\t\tHref: loc,\n\t\t\tData: []cljs.Data{{Name: \"pagination\", Value: page}},\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\t\/\/ add search parameters at the end of the response\n\terr = resource.AddItem(cljs.Item{\n\t\tHref: loc,\n\t\tData: []cljs.Data{{Name: \"search parameters\", Value: p}},\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trespond(200, resource, respWriter, request)\n}\n\n\/\/ truere is a case insensitive regex that matches the string 'true'\nvar truere = regexp.MustCompile(\"(?i)^true$\")\n\n\/\/ false is a case insensitive regex that matches the string 'false'\nvar falsere = regexp.MustCompile(\"(?i)^false$\")\n\n\/\/ parseSearchParameters transforms a query string into search parameters in the migdb format\nfunc parseSearchParameters(qp url.Values) (p migdb.SearchParameters, filterFound bool, err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"parseSearchParameters()-> %v\", e)\n\t\t}\n\t}()\n\tp = migdb.NewSearchParameters()\n\tfor queryParams, _ := range qp {\n\t\tswitch queryParams {\n\t\tcase \"actionname\":\n\t\t\tp.ActionName = qp[\"actionname\"][0]\n\t\tcase \"actionid\":\n\t\t\tp.ActionID = qp[\"actionid\"][0]\n\t\tcase \"after\":\n\t\t\tp.After, err = time.Parse(time.RFC3339, qp[\"after\"][0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"after date not in RFC3339 format\")\n\t\t\t}\n\t\tcase \"agentid\":\n\t\t\tp.AgentID = qp[\"agentid\"][0]\n\t\tcase \"agentname\":\n\t\t\tp.AgentName = qp[\"agentname\"][0]\n\t\tcase \"before\":\n\t\t\tp.Before, err = time.Parse(time.RFC3339, qp[\"before\"][0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"before date not in RFC3339 format\")\n\t\t\t}\n\t\tcase \"commandid\":\n\t\t\tp.CommandID = qp[\"commandid\"][0]\n\t\tcase \"foundanything\":\n\t\t\tif truere.MatchString(qp[\"foundanything\"][0]) {\n\t\t\t\tp.FoundAnything = true\n\t\t\t} else if falsere.MatchString(qp[\"foundanything\"][0]) {\n\t\t\t\tp.FoundAnything = false\n\t\t\t} else {\n\t\t\t\tpanic(\"foundanything parameter must be true or false\")\n\t\t\t}\n\t\t\tfilterFound = true\n\t\tcase \"investigatorid\":\n\t\t\tp.InvestigatorID = qp[\"investigatorid\"][0]\n\t\tcase \"investigatorname\":\n\t\t\tp.InvestigatorName = qp[\"investigatorname\"][0]\n\t\tcase \"limit\":\n\t\t\tp.Limit, err = strconv.ParseFloat(qp[\"limit\"][0], 64)\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"invalid limit parameter\")\n\t\t\t}\n\t\tcase \"offset\":\n\t\t\tp.Offset, err = strconv.ParseFloat(qp[\"offset\"][0], 64)\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"invalid offset parameter\")\n\t\t\t}\n\t\tcase \"report\":\n\t\t\tswitch qp[\"report\"][0] {\n\t\t\tcase \"complianceitems\":\n\t\t\t\tp.Report = qp[\"report\"][0]\n\t\t\tcase \"geolocations\":\n\t\t\t\tp.Report = qp[\"report\"][0]\n\t\t\tdefault:\n\t\t\t\tpanic(\"report not implemented\")\n\t\t\t}\n\t\tcase \"status\":\n\t\t\tp.Status = qp[\"status\"][0]\n\t\tcase \"target\":\n\t\t\tp.Target = qp[\"target\"][0]\n\t\tcase \"threatfamily\":\n\t\t\tp.ThreatFamily = qp[\"threatfamily\"][0]\n\t\tcase \"type\":\n\t\t\tp.Type = qp[\"type\"][0]\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package semver\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n)\n\ntype wildcardType int\n\nconst (\n\tnoneWildcard wildcardType = iota\n\tmajorWildcard wildcardType = 1\n\tminorWildcard wildcardType = 2\n\tpatchWildcard wildcardType = 3\n)\n\nfunc wildcardTypefromInt(i int) wildcardType {\n\tswitch i {\n\tcase 1:\n\t\treturn majorWildcard\n\tcase 2:\n\t\treturn minorWildcard\n\tcase 3:\n\t\treturn patchWildcard\n\tdefault:\n\t\treturn noneWildcard\n\t}\n}\n\ntype comparator func(Version, Version) bool\n\nvar (\n\tcompEQ comparator = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) == 0\n\t}\n\tcompNE = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) != 0\n\t}\n\tcompGT = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) == 1\n\t}\n\tcompGE = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) >= 0\n\t}\n\tcompLT = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) == -1\n\t}\n\tcompLE = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) <= 0\n\t}\n)\n\ntype versionRange struct {\n\tv Version\n\tc comparator\n}\n\n\/\/ rangeFunc creates a Range from the given versionRange.\nfunc (vr *versionRange) rangeFunc() Range {\n\treturn Range(func(v Version) bool {\n\t\treturn vr.c(v, vr.v)\n\t})\n}\n\n\/\/ Range represents a range of versions.\n\/\/ A Range can be used to check if a Version satisfies it:\n\/\/\n\/\/ range, err := semver.ParseRange(\">1.0.0 <2.0.0\")\n\/\/ range(semver.MustParse(\"1.1.1\") \/\/ returns true\ntype Range func(Version) bool\n\n\/\/ OR combines the existing Range with another Range using logical OR.\nfunc (rf Range) OR(f Range) Range {\n\treturn Range(func(v Version) bool {\n\t\treturn rf(v) || f(v)\n\t})\n}\n\n\/\/ AND combines the existing Range with another Range using logical AND.\nfunc (rf Range) AND(f Range) Range {\n\treturn Range(func(v Version) bool {\n\t\treturn rf(v) && f(v)\n\t})\n}\n\n\/\/ ParseRange parses a range and returns a Range.\n\/\/ If the range could not be parsed an error is returned.\n\/\/\n\/\/ Valid ranges are:\n\/\/ - \"<1.0.0\"\n\/\/ - \"<=1.0.0\"\n\/\/ - \">1.0.0\"\n\/\/ - \">=1.0.0\"\n\/\/ - \"1.0.0\", \"=1.0.0\", \"==1.0.0\"\n\/\/ - \"!1.0.0\", \"!=1.0.0\"\n\/\/\n\/\/ A Range can consist of multiple ranges separated by space:\n\/\/ Ranges can be linked by logical AND:\n\/\/ - \">1.0.0 <2.0.0\" would match between both ranges, so \"1.1.1\" and \"1.8.7\" but not \"1.0.0\" or \"2.0.0\"\n\/\/ - \">1.0.0 <3.0.0 !2.0.3-beta.2\" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2\n\/\/\n\/\/ Ranges can also be linked by logical OR:\n\/\/ - \"<2.0.0 || >=3.0.0\" would match \"1.x.x\" and \"3.x.x\" but not \"2.x.x\"\n\/\/\n\/\/ AND has a higher precedence than OR. It's not possible to use brackets.\n\/\/\n\/\/ Ranges can be combined by both AND and OR\n\/\/\n\/\/ - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`\nfunc ParseRange(s string) (Range, error) {\n\tparts := splitAndTrim(s)\n\torParts, err := splitORParts(parts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texpandedParts, err := expandWildcardVersion(orParts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar orFn Range\n\tfor _, p := range expandedParts {\n\t\tvar andFn Range\n\t\tfor _, ap := range p {\n\t\t\topStr, vStr, err := splitComparatorVersion(ap)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvr, err := buildVersionRange(opStr, vStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Could not parse Range %q: %s\", ap, err)\n\t\t\t}\n\t\t\trf := vr.rangeFunc()\n\n\t\t\t\/\/ Set function\n\t\t\tif andFn == nil {\n\t\t\t\tandFn = rf\n\t\t\t} else { \/\/ Combine with existing function\n\t\t\t\tandFn = andFn.AND(rf)\n\t\t\t}\n\t\t}\n\t\tif orFn == nil {\n\t\t\torFn = andFn\n\t\t} else {\n\t\t\torFn = orFn.OR(andFn)\n\t\t}\n\n\t}\n\treturn orFn, nil\n}\n\n\/\/ splitORParts splits the already cleaned parts by '||'.\n\/\/ Checks for invalid positions of the operator and returns an\n\/\/ error if found.\nfunc splitORParts(parts []string) ([][]string, error) {\n\tvar ORparts [][]string\n\tlast := 0\n\tfor i, p := range parts {\n\t\tif p == \"||\" {\n\t\t\tif i == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"First element in range is '||'\")\n\t\t\t}\n\t\t\tORparts = append(ORparts, parts[last:i])\n\t\t\tlast = i + 1\n\t\t}\n\t}\n\tif last == len(parts) {\n\t\treturn nil, fmt.Errorf(\"Last element in range is '||'\")\n\t}\n\tORparts = append(ORparts, parts[last:])\n\treturn ORparts, nil\n}\n\n\/\/ buildVersionRange takes a slice of 2: operator and version\n\/\/ and builds a versionRange, otherwise an error.\nfunc buildVersionRange(opStr, vStr string) (*versionRange, error) {\n\tc := parseComparator(opStr)\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"Could not parse comparator %q in %q\", opStr, strings.Join([]string{opStr, vStr}, \"\"))\n\t}\n\tv, err := Parse(vStr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not parse version %q in %q: %s\", vStr, strings.Join([]string{opStr, vStr}, \"\"), err)\n\t}\n\n\treturn &versionRange{\n\t\tv: v,\n\t\tc: c,\n\t}, nil\n\n}\n\n\/\/ inArray checks if a byte is contained in an array of bytes\nfunc inArray(s byte, list []byte) bool {\n\tfor _, el := range list {\n\t\tif el == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ splitAndTrim splits a range string by spaces and cleans whitespaces\nfunc splitAndTrim(s string) (result []string) {\n\tlast := 0\n\tvar lastChar byte\n\texcludeFromSplit := []byte{'>', '<', '='}\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {\n\t\t\tif last < i-1 {\n\t\t\t\tresult = append(result, s[last:i])\n\t\t\t}\n\t\t\tlast = i + 1\n\t\t} else if s[i] != ' ' {\n\t\t\tlastChar = s[i]\n\t\t}\n\t}\n\tif last < len(s)-1 {\n\t\tresult = append(result, s[last:])\n\t}\n\n\tfor i, v := range result {\n\t\tresult[i] = strings.Replace(v, \" \", \"\", -1)\n\t}\n\n\t\/\/ parts := strings.Split(s, \" \")\n\t\/\/ for _, x := range parts {\n\t\/\/ \tif s := strings.TrimSpace(x); len(s) != 0 {\n\t\/\/ \t\tresult = append(result, s)\n\t\/\/ \t}\n\t\/\/ }\n\treturn\n}\n\n\/\/ splitComparatorVersion splits the comparator from the version.\n\/\/ Input must be free of leading or trailing spaces.\nfunc splitComparatorVersion(s string) (string, string, error) {\n\ti := strings.IndexFunc(s, unicode.IsDigit)\n\tif i == -1 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Could not get version from string: %q\", s)\n\t}\n\treturn strings.TrimSpace(s[0:i]), s[i:], nil\n}\n\n\/\/ getWildcardType will return the type of wildcard that the\n\/\/ passed version contains\nfunc getWildcardType(vStr string) wildcardType {\n\tparts := strings.Split(vStr, \".\")\n\tnparts := len(parts)\n\twildcard := parts[nparts-1]\n\n\tpossibleWildcardType := wildcardTypefromInt(nparts)\n\tif wildcard == \"x\" {\n\t\treturn possibleWildcardType\n\t}\n\n\treturn noneWildcard\n}\n\n\/\/ createVersionFromWildcard will convert a wildcard version\n\/\/ into a regular version, replacing 'x's with '0's, handling\n\/\/ special cases like '1.x.x' and '1.x'\nfunc createVersionFromWildcard(vStr string) string {\n\t\/\/ handle 1.x.x\n\tvStr2 := strings.Replace(vStr, \".x.x\", \".x\", 1)\n\tvStr2 = strings.Replace(vStr2, \".x\", \".0\", 1)\n\tparts := strings.Split(vStr2, \".\")\n\n\t\/\/ handle 1.x\n\tif len(parts) == 2 {\n\t\treturn vStr2 + \".0\"\n\t}\n\n\treturn vStr2\n}\n\n\/\/ incrementMajorVersion will increment the major version\n\/\/ of the passed version\nfunc incrementMajorVersion(vStr string) (string, error) {\n\tparts := strings.Split(vStr, \".\")\n\ti, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tparts[0] = strconv.Itoa(i + 1)\n\n\treturn strings.Join(parts, \".\"), nil\n}\n\n\/\/ incrementMajorVersion will increment the minor version\n\/\/ of the passed version\nfunc incrementMinorVersion(vStr string) (string, error) {\n\tparts := strings.Split(vStr, \".\")\n\ti, err := strconv.Atoi(parts[1])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tparts[1] = strconv.Itoa(i + 1)\n\n\treturn strings.Join(parts, \".\"), nil\n}\n\n\/\/ expandWildcardVersion will expand wildcards inside versions\n\/\/ following these rules:\n\/\/\n\/\/ * when dealing with patch wildcards:\n\/\/ >= 1.2.x will become >= 1.2.0\n\/\/ <= 1.2.x will become < 1.3.0\n\/\/ > 1.2.x will become >= 1.3.0\n\/\/ < 1.2.x will become < 1.2.0\n\/\/ != 1.2.x will become < 1.2.0 >= 1.3.0\n\/\/\n\/\/ * when dealing with minor wildcards:\n\/\/ >= 1.x will become >= 1.0.0\n\/\/ <= 1.x will become < 2.0.0\n\/\/ > 1.x will become >= 2.0.0\n\/\/ < 1.0 will become < 1.0.0\n\/\/ != 1.x will become < 1.0.0 >= 2.0.0\n\/\/\n\/\/ * when dealing with wildcards without\n\/\/ version operator:\n\/\/ 1.2.x will become >= 1.2.0 < 1.3.0\n\/\/ 1.x will become >= 1.0.0 < 2.0.0\nfunc expandWildcardVersion(parts [][]string) ([][]string, error) {\n\tvar expandedParts [][]string\n\tfor _, p := range parts {\n\t\tvar newParts []string\n\t\tfor _, ap := range p {\n\t\t\tif strings.Index(ap, \"x\") != -1 {\n\t\t\t\topStr, vStr, err := splitComparatorVersion(ap)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tversionWildcardType := getWildcardType(vStr)\n\t\t\t\tflatVersion := createVersionFromWildcard(vStr)\n\n\t\t\t\tvar resultOperator string\n\t\t\t\tvar shouldIncrementVersion bool\n\t\t\t\tswitch opStr {\n\t\t\t\tcase \">\":\n\t\t\t\t\tresultOperator = \">=\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\tcase \">=\":\n\t\t\t\t\tresultOperator = \">=\"\n\t\t\t\tcase \"<\":\n\t\t\t\t\tresultOperator = \"<\"\n\t\t\t\tcase \"<=\":\n\t\t\t\t\tresultOperator = \"<\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\tcase \"\", \"=\", \"==\":\n\t\t\t\t\tnewParts = append(newParts, \">=\"+flatVersion)\n\t\t\t\t\tresultOperator = \"<\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\tcase \"!=\", \"!\":\n\t\t\t\t\tnewParts = append(newParts, \"<\"+flatVersion)\n\t\t\t\t\tresultOperator = \">=\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\t}\n\n\t\t\t\tvar resultVersion string\n\t\t\t\tif shouldIncrementVersion {\n\t\t\t\t\tswitch versionWildcardType {\n\t\t\t\t\tcase patchWildcard:\n\t\t\t\t\t\tresultVersion, _ = incrementMinorVersion(flatVersion)\n\t\t\t\t\tcase minorWildcard:\n\t\t\t\t\t\tresultVersion, _ = incrementMajorVersion(flatVersion)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresultVersion = flatVersion\n\t\t\t\t}\n\n\t\t\t\tap = resultOperator + resultVersion\n\t\t\t}\n\t\t\tnewParts = append(newParts, ap)\n\t\t}\n\t\texpandedParts = append(expandedParts, newParts)\n\t}\n\n\treturn expandedParts, nil\n}\n\nfunc parseComparator(s string) comparator {\n\tswitch s {\n\tcase \"==\":\n\t\tfallthrough\n\tcase \"\":\n\t\tfallthrough\n\tcase \"=\":\n\t\treturn compEQ\n\tcase \">\":\n\t\treturn compGT\n\tcase \">=\":\n\t\treturn compGE\n\tcase \"<\":\n\t\treturn compLT\n\tcase \"<=\":\n\t\treturn compLE\n\tcase \"!\":\n\t\tfallthrough\n\tcase \"!=\":\n\t\treturn compNE\n\t}\n\n\treturn nil\n}\n\n\/\/ MustParseRange is like ParseRange but panics if the range cannot be parsed.\nfunc MustParseRange(s string) Range {\n\tr, err := ParseRange(s)\n\tif err != nil {\n\t\tpanic(`semver: ParseRange(` + s + `): ` + err.Error())\n\t}\n\treturn r\n}\n<commit_msg>simplify cases of strings.Index with strings.Contains<commit_after>package semver\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n)\n\ntype wildcardType int\n\nconst (\n\tnoneWildcard wildcardType = iota\n\tmajorWildcard wildcardType = 1\n\tminorWildcard wildcardType = 2\n\tpatchWildcard wildcardType = 3\n)\n\nfunc wildcardTypefromInt(i int) wildcardType {\n\tswitch i {\n\tcase 1:\n\t\treturn majorWildcard\n\tcase 2:\n\t\treturn minorWildcard\n\tcase 3:\n\t\treturn patchWildcard\n\tdefault:\n\t\treturn noneWildcard\n\t}\n}\n\ntype comparator func(Version, Version) bool\n\nvar (\n\tcompEQ comparator = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) == 0\n\t}\n\tcompNE = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) != 0\n\t}\n\tcompGT = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) == 1\n\t}\n\tcompGE = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) >= 0\n\t}\n\tcompLT = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) == -1\n\t}\n\tcompLE = func(v1 Version, v2 Version) bool {\n\t\treturn v1.Compare(v2) <= 0\n\t}\n)\n\ntype versionRange struct {\n\tv Version\n\tc comparator\n}\n\n\/\/ rangeFunc creates a Range from the given versionRange.\nfunc (vr *versionRange) rangeFunc() Range {\n\treturn Range(func(v Version) bool {\n\t\treturn vr.c(v, vr.v)\n\t})\n}\n\n\/\/ Range represents a range of versions.\n\/\/ A Range can be used to check if a Version satisfies it:\n\/\/\n\/\/ range, err := semver.ParseRange(\">1.0.0 <2.0.0\")\n\/\/ range(semver.MustParse(\"1.1.1\") \/\/ returns true\ntype Range func(Version) bool\n\n\/\/ OR combines the existing Range with another Range using logical OR.\nfunc (rf Range) OR(f Range) Range {\n\treturn Range(func(v Version) bool {\n\t\treturn rf(v) || f(v)\n\t})\n}\n\n\/\/ AND combines the existing Range with another Range using logical AND.\nfunc (rf Range) AND(f Range) Range {\n\treturn Range(func(v Version) bool {\n\t\treturn rf(v) && f(v)\n\t})\n}\n\n\/\/ ParseRange parses a range and returns a Range.\n\/\/ If the range could not be parsed an error is returned.\n\/\/\n\/\/ Valid ranges are:\n\/\/ - \"<1.0.0\"\n\/\/ - \"<=1.0.0\"\n\/\/ - \">1.0.0\"\n\/\/ - \">=1.0.0\"\n\/\/ - \"1.0.0\", \"=1.0.0\", \"==1.0.0\"\n\/\/ - \"!1.0.0\", \"!=1.0.0\"\n\/\/\n\/\/ A Range can consist of multiple ranges separated by space:\n\/\/ Ranges can be linked by logical AND:\n\/\/ - \">1.0.0 <2.0.0\" would match between both ranges, so \"1.1.1\" and \"1.8.7\" but not \"1.0.0\" or \"2.0.0\"\n\/\/ - \">1.0.0 <3.0.0 !2.0.3-beta.2\" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2\n\/\/\n\/\/ Ranges can also be linked by logical OR:\n\/\/ - \"<2.0.0 || >=3.0.0\" would match \"1.x.x\" and \"3.x.x\" but not \"2.x.x\"\n\/\/\n\/\/ AND has a higher precedence than OR. It's not possible to use brackets.\n\/\/\n\/\/ Ranges can be combined by both AND and OR\n\/\/\n\/\/ - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`\nfunc ParseRange(s string) (Range, error) {\n\tparts := splitAndTrim(s)\n\torParts, err := splitORParts(parts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texpandedParts, err := expandWildcardVersion(orParts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar orFn Range\n\tfor _, p := range expandedParts {\n\t\tvar andFn Range\n\t\tfor _, ap := range p {\n\t\t\topStr, vStr, err := splitComparatorVersion(ap)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvr, err := buildVersionRange(opStr, vStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Could not parse Range %q: %s\", ap, err)\n\t\t\t}\n\t\t\trf := vr.rangeFunc()\n\n\t\t\t\/\/ Set function\n\t\t\tif andFn == nil {\n\t\t\t\tandFn = rf\n\t\t\t} else { \/\/ Combine with existing function\n\t\t\t\tandFn = andFn.AND(rf)\n\t\t\t}\n\t\t}\n\t\tif orFn == nil {\n\t\t\torFn = andFn\n\t\t} else {\n\t\t\torFn = orFn.OR(andFn)\n\t\t}\n\n\t}\n\treturn orFn, nil\n}\n\n\/\/ splitORParts splits the already cleaned parts by '||'.\n\/\/ Checks for invalid positions of the operator and returns an\n\/\/ error if found.\nfunc splitORParts(parts []string) ([][]string, error) {\n\tvar ORparts [][]string\n\tlast := 0\n\tfor i, p := range parts {\n\t\tif p == \"||\" {\n\t\t\tif i == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"First element in range is '||'\")\n\t\t\t}\n\t\t\tORparts = append(ORparts, parts[last:i])\n\t\t\tlast = i + 1\n\t\t}\n\t}\n\tif last == len(parts) {\n\t\treturn nil, fmt.Errorf(\"Last element in range is '||'\")\n\t}\n\tORparts = append(ORparts, parts[last:])\n\treturn ORparts, nil\n}\n\n\/\/ buildVersionRange takes a slice of 2: operator and version\n\/\/ and builds a versionRange, otherwise an error.\nfunc buildVersionRange(opStr, vStr string) (*versionRange, error) {\n\tc := parseComparator(opStr)\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"Could not parse comparator %q in %q\", opStr, strings.Join([]string{opStr, vStr}, \"\"))\n\t}\n\tv, err := Parse(vStr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not parse version %q in %q: %s\", vStr, strings.Join([]string{opStr, vStr}, \"\"), err)\n\t}\n\n\treturn &versionRange{\n\t\tv: v,\n\t\tc: c,\n\t}, nil\n\n}\n\n\/\/ inArray checks if a byte is contained in an array of bytes\nfunc inArray(s byte, list []byte) bool {\n\tfor _, el := range list {\n\t\tif el == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ splitAndTrim splits a range string by spaces and cleans whitespaces\nfunc splitAndTrim(s string) (result []string) {\n\tlast := 0\n\tvar lastChar byte\n\texcludeFromSplit := []byte{'>', '<', '='}\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {\n\t\t\tif last < i-1 {\n\t\t\t\tresult = append(result, s[last:i])\n\t\t\t}\n\t\t\tlast = i + 1\n\t\t} else if s[i] != ' ' {\n\t\t\tlastChar = s[i]\n\t\t}\n\t}\n\tif last < len(s)-1 {\n\t\tresult = append(result, s[last:])\n\t}\n\n\tfor i, v := range result {\n\t\tresult[i] = strings.Replace(v, \" \", \"\", -1)\n\t}\n\n\t\/\/ parts := strings.Split(s, \" \")\n\t\/\/ for _, x := range parts {\n\t\/\/ \tif s := strings.TrimSpace(x); len(s) != 0 {\n\t\/\/ \t\tresult = append(result, s)\n\t\/\/ \t}\n\t\/\/ }\n\treturn\n}\n\n\/\/ splitComparatorVersion splits the comparator from the version.\n\/\/ Input must be free of leading or trailing spaces.\nfunc splitComparatorVersion(s string) (string, string, error) {\n\ti := strings.IndexFunc(s, unicode.IsDigit)\n\tif i == -1 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Could not get version from string: %q\", s)\n\t}\n\treturn strings.TrimSpace(s[0:i]), s[i:], nil\n}\n\n\/\/ getWildcardType will return the type of wildcard that the\n\/\/ passed version contains\nfunc getWildcardType(vStr string) wildcardType {\n\tparts := strings.Split(vStr, \".\")\n\tnparts := len(parts)\n\twildcard := parts[nparts-1]\n\n\tpossibleWildcardType := wildcardTypefromInt(nparts)\n\tif wildcard == \"x\" {\n\t\treturn possibleWildcardType\n\t}\n\n\treturn noneWildcard\n}\n\n\/\/ createVersionFromWildcard will convert a wildcard version\n\/\/ into a regular version, replacing 'x's with '0's, handling\n\/\/ special cases like '1.x.x' and '1.x'\nfunc createVersionFromWildcard(vStr string) string {\n\t\/\/ handle 1.x.x\n\tvStr2 := strings.Replace(vStr, \".x.x\", \".x\", 1)\n\tvStr2 = strings.Replace(vStr2, \".x\", \".0\", 1)\n\tparts := strings.Split(vStr2, \".\")\n\n\t\/\/ handle 1.x\n\tif len(parts) == 2 {\n\t\treturn vStr2 + \".0\"\n\t}\n\n\treturn vStr2\n}\n\n\/\/ incrementMajorVersion will increment the major version\n\/\/ of the passed version\nfunc incrementMajorVersion(vStr string) (string, error) {\n\tparts := strings.Split(vStr, \".\")\n\ti, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tparts[0] = strconv.Itoa(i + 1)\n\n\treturn strings.Join(parts, \".\"), nil\n}\n\n\/\/ incrementMajorVersion will increment the minor version\n\/\/ of the passed version\nfunc incrementMinorVersion(vStr string) (string, error) {\n\tparts := strings.Split(vStr, \".\")\n\ti, err := strconv.Atoi(parts[1])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tparts[1] = strconv.Itoa(i + 1)\n\n\treturn strings.Join(parts, \".\"), nil\n}\n\n\/\/ expandWildcardVersion will expand wildcards inside versions\n\/\/ following these rules:\n\/\/\n\/\/ * when dealing with patch wildcards:\n\/\/ >= 1.2.x will become >= 1.2.0\n\/\/ <= 1.2.x will become < 1.3.0\n\/\/ > 1.2.x will become >= 1.3.0\n\/\/ < 1.2.x will become < 1.2.0\n\/\/ != 1.2.x will become < 1.2.0 >= 1.3.0\n\/\/\n\/\/ * when dealing with minor wildcards:\n\/\/ >= 1.x will become >= 1.0.0\n\/\/ <= 1.x will become < 2.0.0\n\/\/ > 1.x will become >= 2.0.0\n\/\/ < 1.0 will become < 1.0.0\n\/\/ != 1.x will become < 1.0.0 >= 2.0.0\n\/\/\n\/\/ * when dealing with wildcards without\n\/\/ version operator:\n\/\/ 1.2.x will become >= 1.2.0 < 1.3.0\n\/\/ 1.x will become >= 1.0.0 < 2.0.0\nfunc expandWildcardVersion(parts [][]string) ([][]string, error) {\n\tvar expandedParts [][]string\n\tfor _, p := range parts {\n\t\tvar newParts []string\n\t\tfor _, ap := range p {\n\t\t\tif strings.Contains(ap, \"x\") {\n\t\t\t\topStr, vStr, err := splitComparatorVersion(ap)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tversionWildcardType := getWildcardType(vStr)\n\t\t\t\tflatVersion := createVersionFromWildcard(vStr)\n\n\t\t\t\tvar resultOperator string\n\t\t\t\tvar shouldIncrementVersion bool\n\t\t\t\tswitch opStr {\n\t\t\t\tcase \">\":\n\t\t\t\t\tresultOperator = \">=\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\tcase \">=\":\n\t\t\t\t\tresultOperator = \">=\"\n\t\t\t\tcase \"<\":\n\t\t\t\t\tresultOperator = \"<\"\n\t\t\t\tcase \"<=\":\n\t\t\t\t\tresultOperator = \"<\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\tcase \"\", \"=\", \"==\":\n\t\t\t\t\tnewParts = append(newParts, \">=\"+flatVersion)\n\t\t\t\t\tresultOperator = \"<\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\tcase \"!=\", \"!\":\n\t\t\t\t\tnewParts = append(newParts, \"<\"+flatVersion)\n\t\t\t\t\tresultOperator = \">=\"\n\t\t\t\t\tshouldIncrementVersion = true\n\t\t\t\t}\n\n\t\t\t\tvar resultVersion string\n\t\t\t\tif shouldIncrementVersion {\n\t\t\t\t\tswitch versionWildcardType {\n\t\t\t\t\tcase patchWildcard:\n\t\t\t\t\t\tresultVersion, _ = incrementMinorVersion(flatVersion)\n\t\t\t\t\tcase minorWildcard:\n\t\t\t\t\t\tresultVersion, _ = incrementMajorVersion(flatVersion)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresultVersion = flatVersion\n\t\t\t\t}\n\n\t\t\t\tap = resultOperator + resultVersion\n\t\t\t}\n\t\t\tnewParts = append(newParts, ap)\n\t\t}\n\t\texpandedParts = append(expandedParts, newParts)\n\t}\n\n\treturn expandedParts, nil\n}\n\nfunc parseComparator(s string) comparator {\n\tswitch s {\n\tcase \"==\":\n\t\tfallthrough\n\tcase \"\":\n\t\tfallthrough\n\tcase \"=\":\n\t\treturn compEQ\n\tcase \">\":\n\t\treturn compGT\n\tcase \">=\":\n\t\treturn compGE\n\tcase \"<\":\n\t\treturn compLT\n\tcase \"<=\":\n\t\treturn compLE\n\tcase \"!\":\n\t\tfallthrough\n\tcase \"!=\":\n\t\treturn compNE\n\t}\n\n\treturn nil\n}\n\n\/\/ MustParseRange is like ParseRange but panics if the range cannot be parsed.\nfunc MustParseRange(s string) Range {\n\tr, err := ParseRange(s)\n\tif err != nil {\n\t\tpanic(`semver: ParseRange(` + s + `): ` + err.Error())\n\t}\n\treturn r\n}\n<|endoftext|>"} {"text":"<commit_before>package collector\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\tlog \"github.com\/ulricqin\/goutils\/logtool\"\n\t\"github.com\/ulricqin\/goutils\/slicetool\"\n\t\"github.com\/ulricqin\/goutils\/systool\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ListenPorts() []int64 {\n\tbs, err := systool.CmdOutBytes(\"ss\", \"-n\", \"-l\")\n\tif err != nil {\n\t\tlog.Error(\"ss -n -l exec fail: %s\", err)\n\t\treturn []int64{}\n\t}\n\n\treader := bufio.NewReader(bytes.NewBuffer(bs))\n\n\t\/\/ ignore the first line\n\tvar line []byte\n\tline, _, err = reader.ReadLine()\n\tif err == io.EOF || err != nil {\n\t\treturn []int64{}\n\t}\n\n\tret := []int64{}\n\n\tfor {\n\t\tline, _, err = reader.ReadLine()\n\t\tif err == io.EOF || err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tarr := strings.Fields(string(line))\n\t\tif len(arr) != 4 {\n\t\t\tlog.Error(\"output of [ss -n -l] format error\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlocation := strings.LastIndex(arr[2], \":\")\n\t\tport := arr[2][location+1:]\n\n\t\tif p, e := strconv.ParseInt(port, 10, 64); e != nil {\n\t\t\tlog.Error(\"parse port to int64 fail: %s\", e)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tret = append(ret, p)\n\t\t}\n\n\t}\n\n\tret = slicetool.SliceUniqueInt64(ret)\n\n\tlog.Info(\"listening ports: %v\", ret)\n\treturn ret\n}\n<commit_msg>bugfix: listen port<commit_after>package collector\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\tlog \"github.com\/ulricqin\/goutils\/logtool\"\n\t\"github.com\/ulricqin\/goutils\/slicetool\"\n\t\"github.com\/ulricqin\/goutils\/systool\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ListenPorts() []int64 {\n\tbs, err := systool.CmdOutBytes(\"ss\", \"-n\", \"-l\")\n\tif err != nil {\n\t\tlog.Error(\"ss -n -l exec fail: %s\", err)\n\t\treturn []int64{}\n\t}\n\n\treader := bufio.NewReader(bytes.NewBuffer(bs))\n\n\t\/\/ ignore the first line\n\tvar line []byte\n\tline, _, err = reader.ReadLine()\n\tif err == io.EOF || err != nil {\n\t\treturn []int64{}\n\t}\n\n\tret := []int64{}\n\n\tfor {\n\t\tline, _, err = reader.ReadLine()\n\t\tif err == io.EOF || err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tarr := strings.Fields(string(line))\n\t\tarrlen := len(arr)\n\n\t\tif arrlen != 4 && arrlen != 5 {\n\t\t\tlog.Error(\"output of [ss -n -l] format error\")\n\t\t\tcontinue\n\t\t}\n\n\t\tci := 2\n\t\tif arrlen == 5 {\n\t\t\tci = 3\n\t\t}\n\n\t\tlocation := strings.LastIndex(arr[ci], \":\")\n\t\tport := arr[ci][location+1:]\n\n\t\tif p, e := strconv.ParseInt(port, 10, 64); e != nil {\n\t\t\tlog.Error(\"parse port to int64 fail: %s\", e)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tret = append(ret, p)\n\t\t}\n\n\t}\n\n\tret = slicetool.SliceUniqueInt64(ret)\n\n\tlog.Info(\"listening ports: %v\", ret)\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jingweno\/gh\/git\"\n\t\"github.com\/jingweno\/gh\/github\"\n\t\"github.com\/jingweno\/gh\/utils\"\n\t\"os\"\n)\n\nvar cmdCiStatus = &Command{\n\tRun: ciStatus,\n\tUsage: \"ci-status [COMMIT]\",\n\tShort: \"Show CI status of a commit\",\n\tLong: `Looks up the SHA for COMMIT in GitHub Status API and displays the latest\nstatus. If no COMMIT is provided, HEAD will be used. Exits with one of:\n\nsuccess (0), error (1), failure (1), pending (2), no status (3)\n`,\n}\n\n\/*\n $ gh ci-status\n > (prints CI state of HEAD and exits with appropriate code)\n > One of: success (0), error (1), failure (1), pending (2), no status (3)\n\n $ gh ci-status BRANCH\n > (prints CI state of BRANCH and exits with appropriate code)\n > One of: success (0), error (1), failure (1), pending (2), no status (3)\n\n $ gh ci-status SHA\n > (prints CI state of SHA and exits with appropriate code)\n > One of: success (0), error (1), failure (1), pending (2), no status (3)\n*\/\nfunc ciStatus(cmd *Command, args *Args) {\n\tref := \"HEAD\"\n\tif !args.IsParamsEmpty() {\n\t\tref = args.RemoveParam(0)\n\t}\n\n\tref, err := git.Ref(ref)\n\tutils.Check(err)\n\n\targs.Replace(\"\", \"\")\n\tif args.Noop {\n\t\tfmt.Printf(\"Would request CI status for %s\", ref)\n\t} else {\n\t\tstate, targetURL, desc, exitCode, err := fetchCiStatus(ref)\n\t\tutils.Check(err)\n\t\tfmt.Println(state)\n\t\tif targetURL != \"\" {\n\t\t\tfmt.Println(targetURL)\n\t\t}\n\t\tif desc != \"\" {\n\t\t\tfmt.Println(desc)\n\t\t}\n\n\t\tos.Exit(exitCode)\n\t}\n\n}\n\nfunc fetchCiStatus(ref string) (state, targetURL, desc string, exitCode int, err error) {\n\tgh := github.New()\n\tstatus, err := gh.CiStatus(ref)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif status == nil {\n\t\tstate = \"no status\"\n\t} else {\n\t\tstate = status.State\n\t\ttargetURL = status.TargetURL\n\t\tdesc = status.Description\n\t}\n\n\tswitch state {\n\tcase \"success\":\n\t\texitCode = 0\n\tcase \"failure\", \"error\":\n\t\texitCode = 1\n\tcase \"pending\":\n\t\texitCode = 2\n\tdefault:\n\t\texitCode = 3\n\t}\n\n\treturn\n}\n<commit_msg>Trim space<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jingweno\/gh\/git\"\n\t\"github.com\/jingweno\/gh\/github\"\n\t\"github.com\/jingweno\/gh\/utils\"\n\t\"os\"\n)\n\nvar cmdCiStatus = &Command{\n\tRun: ciStatus,\n\tUsage: \"ci-status [COMMIT]\",\n\tShort: \"Show CI status of a commit\",\n\tLong: `Looks up the SHA for COMMIT in GitHub Status API and displays the latest\nstatus. If no COMMIT is provided, HEAD will be used. Exits with one of:\n\nsuccess (0), error (1), failure (1), pending (2), no status (3)\n`,\n}\n\n\/*\n $ gh ci-status\n > (prints CI state of HEAD and exits with appropriate code)\n > One of: success (0), error (1), failure (1), pending (2), no status (3)\n\n $ gh ci-status BRANCH\n > (prints CI state of BRANCH and exits with appropriate code)\n > One of: success (0), error (1), failure (1), pending (2), no status (3)\n\n $ gh ci-status SHA\n > (prints CI state of SHA and exits with appropriate code)\n > One of: success (0), error (1), failure (1), pending (2), no status (3)\n*\/\nfunc ciStatus(cmd *Command, args *Args) {\n\tref := \"HEAD\"\n\tif !args.IsParamsEmpty() {\n\t\tref = args.RemoveParam(0)\n\t}\n\n\tref, err := git.Ref(ref)\n\tutils.Check(err)\n\n\targs.Replace(\"\", \"\")\n\tif args.Noop {\n\t\tfmt.Printf(\"Would request CI status for %s\", ref)\n\t} else {\n\t\tstate, targetURL, desc, exitCode, err := fetchCiStatus(ref)\n\t\tutils.Check(err)\n\t\tfmt.Println(state)\n\t\tif targetURL != \"\" {\n\t\t\tfmt.Println(targetURL)\n\t\t}\n\t\tif desc != \"\" {\n\t\t\tfmt.Println(desc)\n\t\t}\n\n\t\tos.Exit(exitCode)\n\t}\n}\n\nfunc fetchCiStatus(ref string) (state, targetURL, desc string, exitCode int, err error) {\n\tgh := github.New()\n\tstatus, err := gh.CiStatus(ref)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif status == nil {\n\t\tstate = \"no status\"\n\t} else {\n\t\tstate = status.State\n\t\ttargetURL = status.TargetURL\n\t\tdesc = status.Description\n\t}\n\n\tswitch state {\n\tcase \"success\":\n\t\texitCode = 0\n\tcase \"failure\", \"error\":\n\t\texitCode = 1\n\tcase \"pending\":\n\t\texitCode = 2\n\tdefault:\n\t\texitCode = 3\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gohugoio\/hugo\/helpers\"\n\t\"github.com\/gohugoio\/hugo\/hugofs\"\n\t\"github.com\/spf13\/cobra\"\n\tjww \"github.com\/spf13\/jwalterweatherman\"\n)\n\nvar _ cmder = (*newThemeCmd)(nil)\n\ntype newThemeCmd struct {\n\t*baseCmd\n\thugoBuilderCommon\n}\n\nfunc newNewThemeCmd() *newThemeCmd {\n\tccmd := &newThemeCmd{baseCmd: newBaseCmd(nil)}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"theme [name]\",\n\t\tShort: \"Create a new theme\",\n\t\tLong: `Create a new theme (skeleton) called [name] in the current directory.\nNew theme is a skeleton. Please add content to the touched files. Add your\nname to the copyright line in the license and adjust the theme.toml file\nas you see fit.`,\n\t\tRunE: ccmd.newTheme,\n\t}\n\n\tccmd.cmd = cmd\n\n\treturn ccmd\n}\n\n\/\/ newTheme creates a new Hugo theme template\nfunc (n *newThemeCmd) newTheme(cmd *cobra.Command, args []string) error {\n\tc, err := initializeConfig(false, false, &n.hugoBuilderCommon, n, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(args) < 1 {\n\t\treturn newUserError(\"theme name needs to be provided\")\n\t}\n\n\tcreatepath := c.hugo.PathSpec.AbsPathify(filepath.Join(c.Cfg.GetString(\"themesDir\"), args[0]))\n\tjww.FEEDBACK.Println(\"Creating theme at\", createpath)\n\n\tcfg := c.DepsCfg\n\n\tif x, _ := helpers.Exists(createpath, cfg.Fs.Source); x {\n\t\treturn errors.New(createpath + \" already exists\")\n\t}\n\n\tmkdir(createpath, \"layouts\", \"_default\")\n\tmkdir(createpath, \"layouts\", \"partials\")\n\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"index.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"404.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"_default\", \"list.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"_default\", \"single.html\")\n\n\tbaseofDefault := []byte(`<html>\n {{- partial \"head.html\" . -}}\n <body>\n {{- partial \"header.html\" . -}}\n <div id=\"content\">\n {{- block \"main\" . }}{{- end }}\n <\/div>\n {{- partial \"footer.html\" . -}}\n <\/body>\n<\/html>\n`)\n\terr = helpers.WriteToDisk(filepath.Join(createpath, \"layouts\", \"_default\", \"baseof.html\"), bytes.NewReader(baseofDefault), cfg.Fs.Source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"partials\", \"header.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"partials\", \"footer.html\")\n\n\tmkdir(createpath, \"archetypes\")\n\n\tarchDefault := []byte(\"+++\\n+++\\n\")\n\n\terr = helpers.WriteToDisk(filepath.Join(createpath, \"archetypes\", \"default.md\"), bytes.NewReader(archDefault), cfg.Fs.Source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmkdir(createpath, \"static\", \"js\")\n\tmkdir(createpath, \"static\", \"css\")\n\n\tby := []byte(`The MIT License (MIT)\n\nCopyright (c) ` + time.Now().Format(\"2006\") + ` YOUR_NAME_HERE\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n`)\n\n\terr = helpers.WriteToDisk(filepath.Join(createpath, \"LICENSE.md\"), bytes.NewReader(by), cfg.Fs.Source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.createThemeMD(cfg.Fs, createpath)\n\n\treturn nil\n}\n\nfunc (n *newThemeCmd) createThemeMD(fs *hugofs.Fs, inpath string) (err error) {\n\n\tby := []byte(`# theme.toml template for a Hugo theme\n# See https:\/\/github.com\/gohugoio\/hugoThemes#themetoml for an example\n\nname = \"` + strings.Title(helpers.MakeTitle(filepath.Base(inpath))) + `\"\nlicense = \"MIT\"\nlicenselink = \"https:\/\/github.com\/yourname\/yourtheme\/blob\/master\/LICENSE.md\"\ndescription = \"\"\nhomepage = \"http:\/\/example.com\/\"\ntags = []\nfeatures = []\nmin_version = \"0.41\"\n\n[author]\n name = \"\"\n homepage = \"\"\n\n# If porting an existing theme\n[original]\n name = \"\"\n homepage = \"\"\n repo = \"\"\n`)\n\n\terr = helpers.WriteToDisk(filepath.Join(inpath, \"theme.toml\"), bytes.NewReader(by), fs.Source)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn nil\n}\n<commit_msg>commands: Create LICENSE rather than LICENSE.md in \"new theme\"<commit_after>\/\/ Copyright 2018 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gohugoio\/hugo\/helpers\"\n\t\"github.com\/gohugoio\/hugo\/hugofs\"\n\t\"github.com\/spf13\/cobra\"\n\tjww \"github.com\/spf13\/jwalterweatherman\"\n)\n\nvar _ cmder = (*newThemeCmd)(nil)\n\ntype newThemeCmd struct {\n\t*baseCmd\n\thugoBuilderCommon\n}\n\nfunc newNewThemeCmd() *newThemeCmd {\n\tccmd := &newThemeCmd{baseCmd: newBaseCmd(nil)}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"theme [name]\",\n\t\tShort: \"Create a new theme\",\n\t\tLong: `Create a new theme (skeleton) called [name] in the current directory.\nNew theme is a skeleton. Please add content to the touched files. Add your\nname to the copyright line in the license and adjust the theme.toml file\nas you see fit.`,\n\t\tRunE: ccmd.newTheme,\n\t}\n\n\tccmd.cmd = cmd\n\n\treturn ccmd\n}\n\n\/\/ newTheme creates a new Hugo theme template\nfunc (n *newThemeCmd) newTheme(cmd *cobra.Command, args []string) error {\n\tc, err := initializeConfig(false, false, &n.hugoBuilderCommon, n, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(args) < 1 {\n\t\treturn newUserError(\"theme name needs to be provided\")\n\t}\n\n\tcreatepath := c.hugo.PathSpec.AbsPathify(filepath.Join(c.Cfg.GetString(\"themesDir\"), args[0]))\n\tjww.FEEDBACK.Println(\"Creating theme at\", createpath)\n\n\tcfg := c.DepsCfg\n\n\tif x, _ := helpers.Exists(createpath, cfg.Fs.Source); x {\n\t\treturn errors.New(createpath + \" already exists\")\n\t}\n\n\tmkdir(createpath, \"layouts\", \"_default\")\n\tmkdir(createpath, \"layouts\", \"partials\")\n\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"index.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"404.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"_default\", \"list.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"_default\", \"single.html\")\n\n\tbaseofDefault := []byte(`<html>\n {{- partial \"head.html\" . -}}\n <body>\n {{- partial \"header.html\" . -}}\n <div id=\"content\">\n {{- block \"main\" . }}{{- end }}\n <\/div>\n {{- partial \"footer.html\" . -}}\n <\/body>\n<\/html>\n`)\n\terr = helpers.WriteToDisk(filepath.Join(createpath, \"layouts\", \"_default\", \"baseof.html\"), bytes.NewReader(baseofDefault), cfg.Fs.Source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"partials\", \"header.html\")\n\ttouchFile(cfg.Fs.Source, createpath, \"layouts\", \"partials\", \"footer.html\")\n\n\tmkdir(createpath, \"archetypes\")\n\n\tarchDefault := []byte(\"+++\\n+++\\n\")\n\n\terr = helpers.WriteToDisk(filepath.Join(createpath, \"archetypes\", \"default.md\"), bytes.NewReader(archDefault), cfg.Fs.Source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmkdir(createpath, \"static\", \"js\")\n\tmkdir(createpath, \"static\", \"css\")\n\n\tby := []byte(`The MIT License (MIT)\n\nCopyright (c) ` + time.Now().Format(\"2006\") + ` YOUR_NAME_HERE\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n`)\n\n\terr = helpers.WriteToDisk(filepath.Join(createpath, \"LICENSE\"), bytes.NewReader(by), cfg.Fs.Source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.createThemeMD(cfg.Fs, createpath)\n\n\treturn nil\n}\n\nfunc (n *newThemeCmd) createThemeMD(fs *hugofs.Fs, inpath string) (err error) {\n\n\tby := []byte(`# theme.toml template for a Hugo theme\n# See https:\/\/github.com\/gohugoio\/hugoThemes#themetoml for an example\n\nname = \"` + strings.Title(helpers.MakeTitle(filepath.Base(inpath))) + `\"\nlicense = \"MIT\"\nlicenselink = \"https:\/\/github.com\/yourname\/yourtheme\/blob\/master\/LICENSE\"\ndescription = \"\"\nhomepage = \"http:\/\/example.com\/\"\ntags = []\nfeatures = []\nmin_version = \"0.41\"\n\n[author]\n name = \"\"\n homepage = \"\"\n\n# If porting an existing theme\n[original]\n name = \"\"\n homepage = \"\"\n repo = \"\"\n`)\n\n\terr = helpers.WriteToDisk(filepath.Join(inpath, \"theme.toml\"), bytes.NewReader(by), fs.Source)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) OpenFaaS project 2018. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage commands\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/openfaas\/faas-cli\/stack\"\n)\n\nfunc Test_PushValidation(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tscenario string\n\t\timage string\n\t\tisValid bool\n\t}{\n\t\t{scenario: \"Valid image with username\", name: \"cli\", image: \"alexellis\/faas-cli\", isValid: true},\n\t\t{scenario: \"Valid image with remote repo\", name: \"cli\", image: \"10.1.95.201:5000\/faas-cli\", isValid: true},\n\t\t{scenario: \"Invalid image - missing prefix\", name: \"cli\", image: \"faas-cli\", isValid: false},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\tfunctions := map[string]stack.Function{\n\t\t\t\"cli\": stack.Function{\n\t\t\t\tName: testCase.name,\n\t\t\t\tImage: testCase.image,\n\t\t\t},\n\t\t}\n\t\tinvalidImages := validateImages(functions)\n\t\tif len(invalidImages) > 0 && testCase.isValid == true {\n\t\t\tt.Logf(\"scenario: %s want %s to be valid, but was invalid\", testCase.scenario, testCase.image)\n\t\t\tt.Fail()\n\t\t}\n\t\tif len(invalidImages) == 0 && testCase.isValid == false {\n\t\t\tt.Logf(\"scenario: %s want %s to be invalid, but was valid\", testCase.scenario, testCase.image)\n\t\t\tt.Fail()\n\t\t}\n\n\t}\n}\n<commit_msg>Update license<commit_after>\/\/ Copyright (c) OpenFaaS Project 2018. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage commands\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/openfaas\/faas-cli\/stack\"\n)\n\nfunc Test_PushValidation(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tscenario string\n\t\timage string\n\t\tisValid bool\n\t}{\n\t\t{scenario: \"Valid image with username\", name: \"cli\", image: \"alexellis\/faas-cli\", isValid: true},\n\t\t{scenario: \"Valid image with remote repo\", name: \"cli\", image: \"10.1.95.201:5000\/faas-cli\", isValid: true},\n\t\t{scenario: \"Invalid image - missing prefix\", name: \"cli\", image: \"faas-cli\", isValid: false},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\tfunctions := map[string]stack.Function{\n\t\t\t\"cli\": stack.Function{\n\t\t\t\tName: testCase.name,\n\t\t\t\tImage: testCase.image,\n\t\t\t},\n\t\t}\n\t\tinvalidImages := validateImages(functions)\n\t\tif len(invalidImages) > 0 && testCase.isValid == true {\n\t\t\tt.Logf(\"scenario: %s want %s to be valid, but was invalid\", testCase.scenario, testCase.image)\n\t\t\tt.Fail()\n\t\t}\n\t\tif len(invalidImages) == 0 && testCase.isValid == false {\n\t\t\tt.Logf(\"scenario: %s want %s to be invalid, but was valid\", testCase.scenario, testCase.image)\n\t\t\tt.Fail()\n\t\t}\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package testing\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/auth\"\n)\n\n\/\/ InactiveAPIServer (in the auth\/testing package) is an implementation of the\n\/\/ pachyderm auth api that returns NotActivatedError for all requests. This is\n\/\/ meant to be used with local PFS and PPS servers for testing, and should\n\/\/ never be used in a real Pachyderm cluster\ntype InactiveAPIServer struct{}\n\n\/\/ ActivateEnterpriseToken implements the ActivateEnterpriseToken RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) ActivateEnterpriseToken(ctx context.Context, req *auth.ActivateEnterpriseTokenRequest) (resp *auth.ActivateEnterpriseTokenResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetEnterpriseTokenState implements the GetEnterpriseTokenState RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetEnterpriseTokenState(ctx context.Context, req *auth.GetEnterpriseTokenStateRequest) (resp *auth.GetEnterpriseTokenStateResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ EnableAuth implements the EnableAuth RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) EnableAuth(ctx context.Context, req *auth.EnableAuthRequest) (resp *auth.EnableAuthResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ DisableAuth implements the DisableAuth RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) DisableAuth(ctx context.Context, req *auth.DisableAuthRequest) (resp *auth.DisableAuthResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetAdmins implements the GetAdmins RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetAdmins(ctx context.Context, req *auth.GetAdminsRequest) (resp *auth.GetAdminsResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ ModifyAdmins implements the ModifyAdmins RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) ModifyAdmins(ctx context.Context, req *auth.ModifyAdminsRequest) (resp *auth.ModifyAdminsResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ Authenticate implements the Authenticate RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) Authenticate(ctx context.Context, req *auth.AuthenticateRequest) (resp *auth.AuthenticateResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ Authorize implements the Authorize RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) Authorize(ctx context.Context, req *auth.AuthorizeRequest) (resp *auth.AuthorizeResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ WhoAmI implements the WhoAmI RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) WhoAmI(ctx context.Context, req *auth.WhoAmIRequest) (resp *auth.WhoAmIResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ SetScope implements the SetScope RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) SetScope(ctx context.Context, req *auth.SetScopeRequest) (resp *auth.SetScopeResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetScope implements the GetScope RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetScope(ctx context.Context, req *auth.GetScopeRequest) (resp *auth.GetScopeResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetACL implements the GetACL RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetACL(ctx context.Context, req *auth.GetACLRequest) (resp *auth.GetACLResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ SetACL implements the SetACL RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) SetACL(ctx context.Context, req *auth.SetACLRequest) (resp *auth.SetACLResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetCapability implements the GetCapability RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetCapability(ctx context.Context, req *auth.GetCapabilityRequest) (resp *auth.GetCapabilityResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ RevokeAuthToken implements the RevokeAuthToken RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) RevokeAuthToken(ctx context.Context, req *auth.RevokeAuthTokenRequest) (resp *auth.RevokeAuthTokenResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n<commit_msg>Update InactiveAPIServer in auth\/testing to export the new auth API and fix PFS tests<commit_after>package testing\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/auth\"\n)\n\n\/\/ InactiveAPIServer (in the auth\/testing package) is an implementation of the\n\/\/ pachyderm auth api that returns NotActivatedError for all requests. This is\n\/\/ meant to be used with local PFS and PPS servers for testing, and should\n\/\/ never be used in a real Pachyderm cluster\ntype InactiveAPIServer struct{}\n\n\/\/ Activate implements the Activate RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) Activate(ctx context.Context, req *auth.ActivateRequest) (resp *auth.ActivateResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ Deactivate implements the Deactivate RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) Deactivate(ctx context.Context, req *auth.DeactivateRequest) (resp *auth.DeactivateResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetAdmins implements the GetAdmins RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetAdmins(ctx context.Context, req *auth.GetAdminsRequest) (resp *auth.GetAdminsResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ ModifyAdmins implements the ModifyAdmins RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) ModifyAdmins(ctx context.Context, req *auth.ModifyAdminsRequest) (resp *auth.ModifyAdminsResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ Authenticate implements the Authenticate RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) Authenticate(ctx context.Context, req *auth.AuthenticateRequest) (resp *auth.AuthenticateResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ Authorize implements the Authorize RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) Authorize(ctx context.Context, req *auth.AuthorizeRequest) (resp *auth.AuthorizeResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ WhoAmI implements the WhoAmI RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) WhoAmI(ctx context.Context, req *auth.WhoAmIRequest) (resp *auth.WhoAmIResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ SetScope implements the SetScope RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) SetScope(ctx context.Context, req *auth.SetScopeRequest) (resp *auth.SetScopeResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetScope implements the GetScope RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetScope(ctx context.Context, req *auth.GetScopeRequest) (resp *auth.GetScopeResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetACL implements the GetACL RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetACL(ctx context.Context, req *auth.GetACLRequest) (resp *auth.GetACLResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ SetACL implements the SetACL RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) SetACL(ctx context.Context, req *auth.SetACLRequest) (resp *auth.SetACLResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ GetCapability implements the GetCapability RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) GetCapability(ctx context.Context, req *auth.GetCapabilityRequest) (resp *auth.GetCapabilityResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n\n\/\/ RevokeAuthToken implements the RevokeAuthToken RPC, but just returns NotActivatedError\nfunc (a *InactiveAPIServer) RevokeAuthToken(ctx context.Context, req *auth.RevokeAuthTokenRequest) (resp *auth.RevokeAuthTokenResponse, retErr error) {\n\treturn nil, auth.NotActivatedError{}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\tlabelspkg \"k8s.io\/kubernetes\/pkg\/labels\"\n\tkube_watch \"k8s.io\/kubernetes\/pkg\/watch\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pps\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/backoff\"\n\tcol \"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/collection\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/dlock\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/util\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/watch\"\n\tppsserver \"github.com\/pachyderm\/pachyderm\/src\/server\/pps\"\n)\n\nconst (\n\tmasterLockPath = \"_master_lock\"\n)\n\nvar (\n\tfailures = map[string]bool{\n\t\t\"InvalidImageName\": true,\n\t\t\"ErrImagePull\": true,\n\t}\n)\n\n\/\/ The master process is responsible for creating\/deleting workers as\n\/\/ pipelines are created\/removed.\nfunc (a *apiServer) master() {\n\tmasterLock := dlock.NewDLock(a.etcdClient, path.Join(a.etcdPrefix, masterLockPath))\n\tbackoff.RetryNotify(func() error {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\n\t\tctx, err := masterLock.Lock(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer masterLock.Unlock(ctx)\n\n\t\tlog.Infof(\"Launching PPS master process\")\n\n\t\tpipelineWatcher, err := a.pipelines.ReadOnly(ctx).WatchWithPrev()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating watch: %+v\", err)\n\t\t}\n\t\tdefer pipelineWatcher.Close()\n\n\t\t\/\/ watchChan will be nil if the Watch call below errors, this means\n\t\t\/\/ that we won't receive events from k8s and won't be able to detect\n\t\t\/\/ errors in pods. We could just return that error and retry but that\n\t\t\/\/ prevents pachyderm from creating pipelines when there's an issue\n\t\t\/\/ talking to k8s.\n\t\tvar watchChan <-chan kube_watch.Event\n\t\tkubePipelineWatch, err := a.kubeClient.Pods(a.namespace).Watch(api.ListOptions{\n\t\t\tLabelSelector: labelspkg.SelectorFromSet(map[string]string{\n\t\t\t\t\"component\": \"worker\",\n\t\t\t}),\n\t\t\tWatch: true,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to watch kuburnetes pods: %v\", err)\n\t\t} else {\n\t\t\twatchChan = kubePipelineWatch.ResultChan()\n\t\t\tdefer kubePipelineWatch.Stop()\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-pipelineWatcher.Watch():\n\t\t\t\tif event.Err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"event err: %+v\", event.Err)\n\t\t\t\t}\n\t\t\t\tswitch event.Type {\n\t\t\t\tcase watch.EventPut:\n\t\t\t\t\tvar pipelineName string\n\t\t\t\t\tvar pipelineInfo pps.PipelineInfo\n\t\t\t\t\tif err := event.Unmarshal(&pipelineName, &pipelineInfo); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif pipelineInfo.Salt == \"\" {\n\t\t\t\t\t\tif _, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error {\n\t\t\t\t\t\t\tpipelines := a.pipelines.ReadWrite(stm)\n\t\t\t\t\t\t\tnewPipelineInfo := new(pps.PipelineInfo)\n\t\t\t\t\t\t\tif err := pipelines.Get(pipelineInfo.Pipeline.Name, newPipelineInfo); err != nil {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"error getting pipeline %s: %+v\", pipelineName, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif newPipelineInfo.Salt == \"\" {\n\t\t\t\t\t\t\t\tnewPipelineInfo.Salt = uuid.NewWithoutDashes()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpipelines.Put(pipelineInfo.Pipeline.Name, newPipelineInfo)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tvar prevPipelineInfo pps.PipelineInfo\n\t\t\t\t\tif event.PrevKey != nil {\n\t\t\t\t\t\tif err := event.UnmarshalPrev(&pipelineName, &prevPipelineInfo); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ If the pipeline has been stopped, delete workers\n\t\t\t\t\tif pipelineStateToStopped(pipelineInfo.State) {\n\t\t\t\t\t\tlog.Infof(\"master: deleting workers for pipeline %s\", pipelineInfo.Pipeline.Name)\n\t\t\t\t\t\tif err := a.deleteWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ If the pipeline has been restarted, create workers\n\t\t\t\t\tif !pipelineStateToStopped(pipelineInfo.State) && event.PrevKey != nil && pipelineStateToStopped(prevPipelineInfo.State) {\n\t\t\t\t\t\tif err := a.upsertWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\t\tif err := a.setPipelineFailure(ctx, pipelineInfo.Pipeline.Name, fmt.Sprintf(\"failed to create workers: %s\", err.Error())); err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ If the pipeline has been updated, create new workers\n\t\t\t\t\tif pipelineInfo.Version > prevPipelineInfo.Version && !pipelineStateToStopped(pipelineInfo.State) {\n\t\t\t\t\t\tlog.Infof(\"master: creating\/updating workers for pipeline %s\", pipelineInfo.Pipeline.Name)\n\t\t\t\t\t\tif event.PrevKey != nil {\n\t\t\t\t\t\t\tif err := a.deleteWorkersForPipeline(&prevPipelineInfo); err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := a.upsertWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\t\tif err := a.setPipelineFailure(ctx, pipelineInfo.Pipeline.Name, fmt.Sprintf(\"failed to create workers: %s\", err.Error())); err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase watch.EventDelete:\n\t\t\t\t\tvar pipelineName string\n\t\t\t\t\tvar pipelineInfo pps.PipelineInfo\n\t\t\t\t\tif err := event.UnmarshalPrev(&pipelineName, &pipelineInfo); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := a.deleteWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase event := <-watchChan:\n\t\t\t\t\/\/ if we get an error we restart the watch, k8s watches seem to\n\t\t\t\t\/\/ sometimes get stuck in a loop returning events with Type =\n\t\t\t\t\/\/ \"\" we treat these as errors since otherwise we get an\n\t\t\t\t\/\/ endless stream of them and can't do anything.\n\t\t\t\tif event.Type == kube_watch.Error || event.Type == \"\" {\n\t\t\t\t\tkubePipelineWatch.Stop()\n\t\t\t\t\tkubePipelineWatch, err = a.kubeClient.Pods(a.namespace).Watch(api.ListOptions{\n\t\t\t\t\t\tLabelSelector: labelspkg.SelectorFromSet(map[string]string{\n\t\t\t\t\t\t\t\"component\": \"worker\",\n\t\t\t\t\t\t}),\n\t\t\t\t\t\tWatch: true,\n\t\t\t\t\t})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tdefer kubePipelineWatch.Stop()\n\t\t\t\t}\n\t\t\t\tpod, ok := event.Object.(*api.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif pod.Status.Phase == api.PodFailed {\n\t\t\t\t\tlog.Errorf(\"pod failed because: %s\", pod.Status.Message)\n\t\t\t\t}\n\t\t\t\tfor _, status := range pod.Status.ContainerStatuses {\n\t\t\t\t\tif status.Name == \"user\" && status.State.Waiting != nil && failures[status.State.Waiting.Reason] {\n\t\t\t\t\t\tif err := a.setPipelineFailure(ctx, pod.ObjectMeta.Annotations[\"pipelineName\"], status.State.Waiting.Message); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error {\n\t\tlog.Errorf(\"master: error running the master process: %v; retrying in %v\", err, d)\n\t\treturn nil\n\t})\n}\n\nfunc (a *apiServer) setPipelineFailure(ctx context.Context, pipelineName string, reason string) error {\n\t\/\/ Set pipeline state to failure\n\t_, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error {\n\t\tpipelines := a.pipelines.ReadWrite(stm)\n\t\tpipelineInfo := new(pps.PipelineInfo)\n\t\tif err := pipelines.Get(pipelineName, pipelineInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpipelineInfo.State = pps.PipelineState_PIPELINE_FAILURE\n\t\tpipelineInfo.Reason = reason\n\t\tpipelines.Put(pipelineName, pipelineInfo)\n\t\treturn nil\n\t})\n\treturn err\n}\n\nfunc (a *apiServer) upsertWorkersForPipeline(pipelineInfo *pps.PipelineInfo) error {\n\tvar errCount int\n\treturn backoff.RetryNotify(func() error {\n\t\tparallelism, err := ppsserver.GetExpectedNumWorkers(a.kubeClient, pipelineInfo.ParallelismSpec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar resources *api.ResourceList\n\t\tif pipelineInfo.ResourceSpec != nil {\n\t\t\tresources, err = util.GetResourceListFromPipeline(pipelineInfo)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Retrieve the current state of the RC. If the RC is scaled down,\n\t\t\/\/ we want to ensure that it remains scaled down.\n\t\trc := a.kubeClient.ReplicationControllers(a.namespace)\n\t\tworkerRc, err := rc.Get(ppsserver.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version))\n\t\tif err == nil {\n\t\t\tif (workerRc.Spec.Template.Spec.Containers[0].Resources.Requests == nil) && workerRc.Spec.Replicas == 1 {\n\t\t\t\tparallelism = 1\n\t\t\t\tresources = nil\n\t\t\t}\n\t\t}\n\n\t\toptions := a.getWorkerOptions(\n\t\t\tpipelineInfo.Pipeline.Name,\n\t\t\tppsserver.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version),\n\t\t\tint32(parallelism),\n\t\t\tresources,\n\t\t\tpipelineInfo.Transform,\n\t\t\tpipelineInfo.CacheSize,\n\t\t\tpipelineInfo.Service)\n\t\t\/\/ Set the pipeline name env\n\t\toptions.workerEnv = append(options.workerEnv, api.EnvVar{\n\t\t\tName: client.PPSPipelineNameEnv,\n\t\t\tValue: pipelineInfo.Pipeline.Name,\n\t\t})\n\t\treturn a.createWorkerRc(options)\n\t}, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error {\n\t\terrCount++\n\t\tif errCount >= 3 {\n\t\t\treturn err\n\t\t}\n\t\tlog.Errorf(\"error creating workers for pipeline %v: %v; retrying in %v\", pipelineInfo.Pipeline.Name, err, d)\n\t\treturn nil\n\t})\n}\n\nfunc (a *apiServer) deleteWorkersForPipeline(pipelineInfo *pps.PipelineInfo) error {\n\trcName := ppsserver.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)\n\tif err := a.kubeClient.Services(a.namespace).Delete(rcName); err != nil {\n\t\tif !isNotFoundErr(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\tif pipelineInfo.Service != nil {\n\t\tif err := a.kubeClient.Services(a.namespace).Delete(rcName + \"-user\"); err != nil {\n\t\t\tif !isNotFoundErr(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tfalseVal := false\n\tdeleteOptions := &api.DeleteOptions{\n\t\tOrphanDependents: &falseVal,\n\t}\n\tif err := a.kubeClient.ReplicationControllers(a.namespace).Delete(rcName, deleteOptions); err != nil {\n\t\tif !isNotFoundErr(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Fix bug in watches I introduced.<commit_after>package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\tlabelspkg \"k8s.io\/kubernetes\/pkg\/labels\"\n\tkube_watch \"k8s.io\/kubernetes\/pkg\/watch\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pps\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/backoff\"\n\tcol \"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/collection\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/dlock\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/util\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/watch\"\n\tppsserver \"github.com\/pachyderm\/pachyderm\/src\/server\/pps\"\n)\n\nconst (\n\tmasterLockPath = \"_master_lock\"\n)\n\nvar (\n\tfailures = map[string]bool{\n\t\t\"InvalidImageName\": true,\n\t\t\"ErrImagePull\": true,\n\t}\n)\n\n\/\/ The master process is responsible for creating\/deleting workers as\n\/\/ pipelines are created\/removed.\nfunc (a *apiServer) master() {\n\tmasterLock := dlock.NewDLock(a.etcdClient, path.Join(a.etcdPrefix, masterLockPath))\n\tbackoff.RetryNotify(func() error {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\n\t\tctx, err := masterLock.Lock(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer masterLock.Unlock(ctx)\n\n\t\tlog.Infof(\"Launching PPS master process\")\n\n\t\tpipelineWatcher, err := a.pipelines.ReadOnly(ctx).WatchWithPrev()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating watch: %+v\", err)\n\t\t}\n\t\tdefer pipelineWatcher.Close()\n\n\t\t\/\/ watchChan will be nil if the Watch call below errors, this means\n\t\t\/\/ that we won't receive events from k8s and won't be able to detect\n\t\t\/\/ errors in pods. We could just return that error and retry but that\n\t\t\/\/ prevents pachyderm from creating pipelines when there's an issue\n\t\t\/\/ talking to k8s.\n\t\tvar watchChan <-chan kube_watch.Event\n\t\tkubePipelineWatch, err := a.kubeClient.Pods(a.namespace).Watch(api.ListOptions{\n\t\t\tLabelSelector: labelspkg.SelectorFromSet(map[string]string{\n\t\t\t\t\"component\": \"worker\",\n\t\t\t}),\n\t\t\tWatch: true,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to watch kuburnetes pods: %v\", err)\n\t\t} else {\n\t\t\twatchChan = kubePipelineWatch.ResultChan()\n\t\t\tdefer kubePipelineWatch.Stop()\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-pipelineWatcher.Watch():\n\t\t\t\tif event.Err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"event err: %+v\", event.Err)\n\t\t\t\t}\n\t\t\t\tswitch event.Type {\n\t\t\t\tcase watch.EventPut:\n\t\t\t\t\tvar pipelineName string\n\t\t\t\t\tvar pipelineInfo pps.PipelineInfo\n\t\t\t\t\tif err := event.Unmarshal(&pipelineName, &pipelineInfo); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif pipelineInfo.Salt == \"\" {\n\t\t\t\t\t\tif _, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error {\n\t\t\t\t\t\t\tpipelines := a.pipelines.ReadWrite(stm)\n\t\t\t\t\t\t\tnewPipelineInfo := new(pps.PipelineInfo)\n\t\t\t\t\t\t\tif err := pipelines.Get(pipelineInfo.Pipeline.Name, newPipelineInfo); err != nil {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"error getting pipeline %s: %+v\", pipelineName, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif newPipelineInfo.Salt == \"\" {\n\t\t\t\t\t\t\t\tnewPipelineInfo.Salt = uuid.NewWithoutDashes()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpipelines.Put(pipelineInfo.Pipeline.Name, newPipelineInfo)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tvar prevPipelineInfo pps.PipelineInfo\n\t\t\t\t\tif event.PrevKey != nil {\n\t\t\t\t\t\tif err := event.UnmarshalPrev(&pipelineName, &prevPipelineInfo); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ If the pipeline has been stopped, delete workers\n\t\t\t\t\tif pipelineStateToStopped(pipelineInfo.State) {\n\t\t\t\t\t\tlog.Infof(\"master: deleting workers for pipeline %s\", pipelineInfo.Pipeline.Name)\n\t\t\t\t\t\tif err := a.deleteWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ If the pipeline has been restarted, create workers\n\t\t\t\t\tif !pipelineStateToStopped(pipelineInfo.State) && event.PrevKey != nil && pipelineStateToStopped(prevPipelineInfo.State) {\n\t\t\t\t\t\tif err := a.upsertWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\t\tif err := a.setPipelineFailure(ctx, pipelineInfo.Pipeline.Name, fmt.Sprintf(\"failed to create workers: %s\", err.Error())); err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ If the pipeline has been updated, create new workers\n\t\t\t\t\tif pipelineInfo.Version > prevPipelineInfo.Version && !pipelineStateToStopped(pipelineInfo.State) {\n\t\t\t\t\t\tlog.Infof(\"master: creating\/updating workers for pipeline %s\", pipelineInfo.Pipeline.Name)\n\t\t\t\t\t\tif event.PrevKey != nil {\n\t\t\t\t\t\t\tif err := a.deleteWorkersForPipeline(&prevPipelineInfo); err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err := a.upsertWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\t\tif err := a.setPipelineFailure(ctx, pipelineInfo.Pipeline.Name, fmt.Sprintf(\"failed to create workers: %s\", err.Error())); err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase watch.EventDelete:\n\t\t\t\t\tvar pipelineName string\n\t\t\t\t\tvar pipelineInfo pps.PipelineInfo\n\t\t\t\t\tif err := event.UnmarshalPrev(&pipelineName, &pipelineInfo); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := a.deleteWorkersForPipeline(&pipelineInfo); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase event := <-watchChan:\n\t\t\t\t\/\/ if we get an error we restart the watch, k8s watches seem to\n\t\t\t\t\/\/ sometimes get stuck in a loop returning events with Type =\n\t\t\t\t\/\/ \"\" we treat these as errors since otherwise we get an\n\t\t\t\t\/\/ endless stream of them and can't do anything.\n\t\t\t\tif event.Type == kube_watch.Error || event.Type == \"\" {\n\t\t\t\t\tkubePipelineWatch.Stop()\n\t\t\t\t\tkubePipelineWatch, err = a.kubeClient.Pods(a.namespace).Watch(api.ListOptions{\n\t\t\t\t\t\tLabelSelector: labelspkg.SelectorFromSet(map[string]string{\n\t\t\t\t\t\t\t\"component\": \"worker\",\n\t\t\t\t\t\t}),\n\t\t\t\t\t\tWatch: true,\n\t\t\t\t\t})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Errorf(\"failed to watch kuburnetes pods: %v\", err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\twatchChan = kubePipelineWatch.ResultChan()\n\t\t\t\t\t\tdefer kubePipelineWatch.Stop()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tpod, ok := event.Object.(*api.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif pod.Status.Phase == api.PodFailed {\n\t\t\t\t\tlog.Errorf(\"pod failed because: %s\", pod.Status.Message)\n\t\t\t\t}\n\t\t\t\tfor _, status := range pod.Status.ContainerStatuses {\n\t\t\t\t\tif status.Name == \"user\" && status.State.Waiting != nil && failures[status.State.Waiting.Reason] {\n\t\t\t\t\t\tif err := a.setPipelineFailure(ctx, pod.ObjectMeta.Annotations[\"pipelineName\"], status.State.Waiting.Message); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error {\n\t\tlog.Errorf(\"master: error running the master process: %v; retrying in %v\", err, d)\n\t\treturn nil\n\t})\n}\n\nfunc (a *apiServer) setPipelineFailure(ctx context.Context, pipelineName string, reason string) error {\n\t\/\/ Set pipeline state to failure\n\t_, err := col.NewSTM(ctx, a.etcdClient, func(stm col.STM) error {\n\t\tpipelines := a.pipelines.ReadWrite(stm)\n\t\tpipelineInfo := new(pps.PipelineInfo)\n\t\tif err := pipelines.Get(pipelineName, pipelineInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpipelineInfo.State = pps.PipelineState_PIPELINE_FAILURE\n\t\tpipelineInfo.Reason = reason\n\t\tpipelines.Put(pipelineName, pipelineInfo)\n\t\treturn nil\n\t})\n\treturn err\n}\n\nfunc (a *apiServer) upsertWorkersForPipeline(pipelineInfo *pps.PipelineInfo) error {\n\tvar errCount int\n\treturn backoff.RetryNotify(func() error {\n\t\tparallelism, err := ppsserver.GetExpectedNumWorkers(a.kubeClient, pipelineInfo.ParallelismSpec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar resources *api.ResourceList\n\t\tif pipelineInfo.ResourceSpec != nil {\n\t\t\tresources, err = util.GetResourceListFromPipeline(pipelineInfo)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Retrieve the current state of the RC. If the RC is scaled down,\n\t\t\/\/ we want to ensure that it remains scaled down.\n\t\trc := a.kubeClient.ReplicationControllers(a.namespace)\n\t\tworkerRc, err := rc.Get(ppsserver.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version))\n\t\tif err == nil {\n\t\t\tif (workerRc.Spec.Template.Spec.Containers[0].Resources.Requests == nil) && workerRc.Spec.Replicas == 1 {\n\t\t\t\tparallelism = 1\n\t\t\t\tresources = nil\n\t\t\t}\n\t\t}\n\n\t\toptions := a.getWorkerOptions(\n\t\t\tpipelineInfo.Pipeline.Name,\n\t\t\tppsserver.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version),\n\t\t\tint32(parallelism),\n\t\t\tresources,\n\t\t\tpipelineInfo.Transform,\n\t\t\tpipelineInfo.CacheSize,\n\t\t\tpipelineInfo.Service)\n\t\t\/\/ Set the pipeline name env\n\t\toptions.workerEnv = append(options.workerEnv, api.EnvVar{\n\t\t\tName: client.PPSPipelineNameEnv,\n\t\t\tValue: pipelineInfo.Pipeline.Name,\n\t\t})\n\t\treturn a.createWorkerRc(options)\n\t}, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error {\n\t\terrCount++\n\t\tif errCount >= 3 {\n\t\t\treturn err\n\t\t}\n\t\tlog.Errorf(\"error creating workers for pipeline %v: %v; retrying in %v\", pipelineInfo.Pipeline.Name, err, d)\n\t\treturn nil\n\t})\n}\n\nfunc (a *apiServer) deleteWorkersForPipeline(pipelineInfo *pps.PipelineInfo) error {\n\trcName := ppsserver.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)\n\tif err := a.kubeClient.Services(a.namespace).Delete(rcName); err != nil {\n\t\tif !isNotFoundErr(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\tif pipelineInfo.Service != nil {\n\t\tif err := a.kubeClient.Services(a.namespace).Delete(rcName + \"-user\"); err != nil {\n\t\t\tif !isNotFoundErr(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tfalseVal := false\n\tdeleteOptions := &api.DeleteOptions{\n\t\tOrphanDependents: &falseVal,\n\t}\n\tif err := a.kubeClient.ReplicationControllers(a.namespace).Delete(rcName, deleteOptions); err != nil {\n\t\tif !isNotFoundErr(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package suggestionbox\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Model represents a single model inside Suggestionbox.\ntype Model struct {\n\t\/\/ ID is the ID of the model.\n\tID string `json:\"id,omitempty\"`\n\t\/\/ Name is the human readable name of the Model.\n\tName string `json:\"name,omitempty\"`\n\t\/\/ Options are optional Model settings to adjust the behaviour\n\t\/\/ of this Model within Suggestionbox.\n\tOptions *ModelOptions `json:\"options,omitempty\"`\n\t\/\/ Choices are the options this Model will select from.\n\tChoices []Choice `json:\"choices,omitempty\"`\n}\n\n\/\/ Feature represents a single feature, to describe an input or a choice\n\/\/ for example age:28 or location:\"London\".\ntype Feature struct {\n\tKey string `json:\"key,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n}\n\n\/\/ Choice is an option with features.\ntype Choice struct {\n\tID string `json:\"id,omitempty\"`\n\tFeatures []Feature `json:\"features,omitempty\"`\n}\n\n\/\/ ModelOptions describes the behaviours of a Model.\ntype ModelOptions struct {\n\t\/\/ Expiration is the time to wait for the reward before it expires.\n\tExpiration time.Duration `json:\"expiration,omitempty\"`\n\n\t\/\/ Epsilon enables proportionate exploiting vs exploring ratio.\n\tEpsilon float64 `json:\"epsilon,omitempty\"`\n\n\t\/\/ SoftmaxLambda enables adaptive exploiting vs exploring ratio.\n\tSoftmaxLambda float64 `json:\"softmax_lambda,omitempty\"`\n\n\t\/\/ Ngrams describes the n-grams for text analysis.\n\tNgrams int `json:\"ngrams,omitempty\"`\n\t\/\/ Skipgrams describes the skip-grams for the text analysis.\n\tSkipgrams int `json:\"skipgrams,omitempty\"`\n}\n\n\/\/ CreateModel creates the Model in Suggestionbox.\n\/\/ If no ID is set, one will be assigned in the return Model.\nfunc (c *Client) CreateModel(ctx context.Context, model Model) (Model, error) {\n\tu, err := url.Parse(c.addr + \"\/suggestionbox\/models\")\n\tif err != nil {\n\t\treturn model, err\n\t}\n\tif !u.IsAbs() {\n\t\treturn model, errors.New(\"box address must be absolute\")\n\t}\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(model); err != nil {\n\t\treturn model, errors.Wrap(err, \"encoding request body\")\n\t}\n\treq, err := http.NewRequest(http.MethodPost, u.String(), &buf)\n\tif err != nil {\n\t\treturn model, err\n\t}\n\treq = req.WithContext(ctx)\n\treq.Header.Set(\"Accept\", \"application\/json; charset=utf-8\")\n\treq.Header.Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tresp, err := c.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn model, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn model, errors.New(resp.Status)\n\t}\n\tvar response struct {\n\t\tSuccess bool\n\t\tError string\n\t\tModel\n\t}\n\tif err := json.NewDecoder(resp.Body).Decode(&response); err != nil {\n\t\treturn model, errors.Wrap(err, \"decoding response\")\n\t}\n\tif !response.Success {\n\t\treturn model, ErrSuggestionbox(response.Error)\n\t}\n\treturn response.Model, nil\n}\n<commit_msg>added more docs<commit_after>package suggestionbox\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Model represents a single model inside Suggestionbox.\ntype Model struct {\n\t\/\/ ID is the ID of the model.\n\tID string `json:\"id,omitempty\"`\n\t\/\/ Name is the human readable name of the Model.\n\tName string `json:\"name,omitempty\"`\n\t\/\/ Options are optional Model settings to adjust the behaviour\n\t\/\/ of this Model within Suggestionbox.\n\tOptions *ModelOptions `json:\"options,omitempty\"`\n\t\/\/ Choices are the options this Model will select from.\n\tChoices []Choice `json:\"choices,omitempty\"`\n}\n\n\/\/ Feature represents a single feature, to describe an input or a choice\n\/\/ for example age:28 or location:\"London\".\ntype Feature struct {\n\t\/\/ Key is the name of the Feature.\n\tKey string `json:\"key,omitempty\"`\n\t\/\/ Value is the string value of this Feature.\n\tValue string `json:\"value,omitempty\"`\n\t\/\/ Type is the type of the Feature.\n\tType string `json:\"type,omitempty\"`\n}\n\n\/\/ Choice is an option with features.\ntype Choice struct {\n\t\/\/ ID is a unique ID for this choice.\n\tID string `json:\"id,omitempty\"`\n\t\/\/ Features holds all the Feature objects that describe\n\t\/\/ this choice.\n\tFeatures []Feature `json:\"features,omitempty\"`\n}\n\n\/\/ ModelOptions describes the behaviours of a Model.\ntype ModelOptions struct {\n\t\/\/ Expiration is the time to wait for the reward before it expires.\n\tExpiration time.Duration `json:\"expiration,omitempty\"`\n\n\t\/\/ Epsilon enables proportionate exploiting vs exploring ratio.\n\tEpsilon float64 `json:\"epsilon,omitempty\"`\n\n\t\/\/ SoftmaxLambda enables adaptive exploiting vs exploring ratio.\n\tSoftmaxLambda float64 `json:\"softmax_lambda,omitempty\"`\n\n\t\/\/ Ngrams describes the n-grams for text analysis.\n\tNgrams int `json:\"ngrams,omitempty\"`\n\t\/\/ Skipgrams describes the skip-grams for the text analysis.\n\tSkipgrams int `json:\"skipgrams,omitempty\"`\n}\n\n\/\/ CreateModel creates the Model in Suggestionbox.\n\/\/ If no ID is set, one will be assigned in the return Model.\nfunc (c *Client) CreateModel(ctx context.Context, model Model) (Model, error) {\n\tu, err := url.Parse(c.addr + \"\/suggestionbox\/models\")\n\tif err != nil {\n\t\treturn model, err\n\t}\n\tif !u.IsAbs() {\n\t\treturn model, errors.New(\"box address must be absolute\")\n\t}\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(model); err != nil {\n\t\treturn model, errors.Wrap(err, \"encoding request body\")\n\t}\n\treq, err := http.NewRequest(http.MethodPost, u.String(), &buf)\n\tif err != nil {\n\t\treturn model, err\n\t}\n\treq = req.WithContext(ctx)\n\treq.Header.Set(\"Accept\", \"application\/json; charset=utf-8\")\n\treq.Header.Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tresp, err := c.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn model, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn model, errors.New(resp.Status)\n\t}\n\tvar response struct {\n\t\tSuccess bool\n\t\tError string\n\t\tModel\n\t}\n\tif err := json.NewDecoder(resp.Body).Decode(&response); err != nil {\n\t\treturn model, errors.Wrap(err, \"decoding response\")\n\t}\n\tif !response.Success {\n\t\treturn model, ErrSuggestionbox(response.Error)\n\t}\n\treturn response.Model, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package recommended\n\nimport (\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n)\n\nconst maxRecommendations = 20\n\n\/\/ Anime shows a list of recommended anime.\nfunc Anime(ctx *aero.Context) string {\n\tnick := ctx.Get(\"nick\")\n\tuser, err := arn.GetUserByNick(nick)\n\n\tif err != nil {\n\t\treturn ctx.Error(http.StatusUnauthorized, \"Not logged in\", err)\n\t}\n\n\tanimeList := user.AnimeList()\n\tgenreItems := animeList.Genres()\n\tgenreAffinity := map[string]float64{}\n\n\tfor genre, animeListItems := range genreItems {\n\t\taffinity := 0.0\n\n\t\tfor _, item := range animeListItems {\n\t\t\tif item.Status == arn.AnimeListStatusDropped {\n\t\t\t\taffinity -= 5.0\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif item.Rating.Overall != 0 {\n\t\t\t\taffinity += item.Rating.Overall\n\t\t\t} else {\n\t\t\t\taffinity += 5.0\n\t\t\t}\n\t\t}\n\n\t\tgenreAffinity[genre] = affinity\n\t}\n\n\t\/\/ Get all anime\n\trecommendations := arn.AllAnime()\n\n\t\/\/ Affinity maps an anime ID to a number that indicates how likely a user is going to enjoy that anime.\n\taffinity := map[string]float64{}\n\n\t\/\/ Calculate affinity for each anime\n\tfor _, anime := range recommendations {\n\t\t\/\/ Skip anime from my list (except planned anime)\n\t\texisting := animeList.Find(anime.ID)\n\n\t\tif existing != nil && existing.Status != arn.AnimeListStatusPlanned {\n\t\t\tcontinue\n\t\t}\n\n\t\taffinity[anime.ID] = float64(anime.Popularity.Total())\n\n\t\t\/\/ animeGenresAffinity := 0.0\n\n\t\t\/\/ if len(anime.Genres) > 0 {\n\t\t\/\/ \tfor _, genre := range anime.Genres {\n\t\t\/\/ \t\tif genreAffinity[genre] > animeGenresAffinity {\n\t\t\/\/ \t\t\tanimeGenresAffinity = genreAffinity[genre]\n\t\t\/\/ \t\t}\n\t\t\/\/ \t}\n\n\t\t\/\/ \tanimeGenresAffinity = animeGenresAffinity \/ float64(len(anime.Genres))\n\t\t\/\/ }\n\n\t\t\/\/ affinity[anime.ID] = animeGenresAffinity\n\t}\n\n\t\/\/ Sort\n\tsort.Slice(recommendations, func(i, j int) bool {\n\t\treturn affinity[recommendations[i].ID] > affinity[recommendations[j].ID]\n\t})\n\n\t\/\/ Take the top 10\n\tif len(recommendations) > maxRecommendations {\n\t\trecommendations = recommendations[:maxRecommendations]\n\t}\n\n\treturn ctx.HTML(components.RecommendedAnime(recommendations, user))\n}\n<commit_msg>Improved recommendations<commit_after>package recommended\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n)\n\nconst (\n\tmaxRecommendations = 20\n\tworstGenreCount = 5\n)\n\n\/\/ Anime shows a list of recommended anime.\nfunc Anime(ctx *aero.Context) string {\n\tnick := ctx.Get(\"nick\")\n\tuser, err := arn.GetUserByNick(nick)\n\n\tif err != nil {\n\t\treturn ctx.Error(http.StatusUnauthorized, \"Not logged in\", err)\n\t}\n\n\tanimeList := user.AnimeList()\n\tgenreItems := animeList.Genres()\n\tgenreAffinity := map[string]float64{}\n\tworstGenres := []string{}\n\n\tfor genre, animeListItems := range genreItems {\n\t\taffinity := 0.0\n\n\t\tfor _, item := range animeListItems {\n\t\t\t\/\/ if item.Status == arn.AnimeListStatusDropped {\n\t\t\t\/\/ \taffinity -= 5.0\n\t\t\t\/\/ \tcontinue\n\t\t\t\/\/ }\n\n\t\t\tif item.Rating.Overall != 0 {\n\t\t\t\taffinity += item.Rating.Overall\n\t\t\t} else {\n\t\t\t\taffinity += 5.0\n\t\t\t}\n\t\t}\n\n\t\tgenreAffinity[genre] = affinity\n\t\tworstGenres = append(worstGenres, genre)\n\t}\n\n\tsort.Slice(worstGenres, func(i, j int) bool {\n\t\treturn genreAffinity[worstGenres[i]] < genreAffinity[worstGenres[j]]\n\t})\n\n\tif len(worstGenres) > worstGenreCount {\n\t\tworstGenres = worstGenres[:worstGenreCount]\n\t}\n\n\tfmt.Println(worstGenres)\n\n\t\/\/ Get all anime\n\trecommendations := arn.AllAnime()\n\n\t\/\/ Affinity maps an anime ID to a number that indicates how likely a user is going to enjoy that anime.\n\taffinity := map[string]float64{}\n\n\t\/\/ Calculate affinity for each anime\n\tfor _, anime := range recommendations {\n\t\t\/\/ Skip anime that are upcoming\n\t\tif anime.Status == \"upcoming\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Skip anime from my list (except planned anime)\n\t\texisting := animeList.Find(anime.ID)\n\n\t\tif existing != nil && existing.Status != arn.AnimeListStatusPlanned {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Skip anime that don't have one of the top genres for that user\n\t\tworstGenreFound := false\n\n\t\tfor _, genre := range anime.Genres {\n\t\t\tif arn.Contains(worstGenres, genre) {\n\t\t\t\tworstGenreFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif worstGenreFound {\n\t\t\tcontinue\n\t\t}\n\n\t\tanimeAffinity := 0.0\n\n\t\t\/\/ Planned anime go higher\n\t\tif existing != nil && existing.Status == arn.AnimeListStatusPlanned {\n\t\t\tanimeAffinity += 75.0\n\t\t}\n\n\t\tanimeAffinity += float64(anime.Popularity.Total())\n\t\taffinity[anime.ID] = animeAffinity\n\t}\n\n\t\/\/ Sort\n\tsort.Slice(recommendations, func(i, j int) bool {\n\t\taffinityA := affinity[recommendations[i].ID]\n\t\taffinityB := affinity[recommendations[j].ID]\n\n\t\tif affinityA == affinityB {\n\t\t\treturn recommendations[i].Title.Canonical < recommendations[j].Title.Canonical\n\t\t}\n\n\t\treturn affinityA > affinityB\n\t})\n\n\t\/\/ Take the top 10\n\tif len(recommendations) > maxRecommendations {\n\t\trecommendations = recommendations[:maxRecommendations]\n\t}\n\n\treturn ctx.HTML(components.RecommendedAnime(recommendations, user))\n}\n<|endoftext|>"} {"text":"<commit_before>package swagger2\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n)\n\ntype Specification struct {\n\tHost string `json:\"host,omitempty\"`\n\tInfo Info `json:\"info,omitempty\"`\n\tBasePath string `json:\"basePath,omitempty\"`\n\tSchemes []string `json:\"schemes,omitempty\"`\n\tPaths map[string]Path `json:\"paths,omitempty\"`\n}\n\nfunc NewSpecificationFromBytes(data []byte) (Specification, error) {\n\tspec := Specification{}\n\terr := json.Unmarshal(data, &spec)\n\treturn spec, err\n}\n\nfunc ReadSwagger2Spec(filepath string) (Specification, error) {\n\tbytes, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn Specification{}, err\n\t}\n\treturn NewSpecificationFromBytes(bytes)\n}\n\ntype Info struct {\n\tDescription string `json:\"description,omitempty\"`\n\tVersion string `json:\"version,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tTermsOfService string `json:\"termsOfService,omitempty\"`\n}\n\ntype Path struct {\n\tGet Endpoint `json:\"get,omitempty\"`\n\tPost Endpoint `json:\"post,omitempty\"`\n\tPut Endpoint `json:\"put,omitempty\"`\n\tDelete Endpoint `json:\"delete,omitempty\"`\n}\n\ntype Endpoint struct {\n\tTags []string `json:\"tags,omitempty\"`\n\tSummary string `json:\"summary,omitempty\"`\n\tOperationId string `json:\"operationId,omitempmty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tConsumes []string `json:\"consumes,omitempty\"`\n\tProduces []string `json:\"produces,omitempty\"`\n\tParameters []Parameter `json:\"parameters\"`\n}\n\ntype Parameter struct {\n\tName string `json:\"name,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tIn string `json:\"in,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tRequired bool `json:\"required,omitempty\"`\n\tDefault interface{} `json:\"default,omitempty\"`\n}\n<commit_msg>swagger2: lint<commit_after>package swagger2\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n)\n\ntype Specification struct {\n\tHost string `json:\"host,omitempty\"`\n\tInfo Info `json:\"info,omitempty\"`\n\tBasePath string `json:\"basePath,omitempty\"`\n\tSchemes []string `json:\"schemes,omitempty\"`\n\tPaths map[string]Path `json:\"paths,omitempty\"`\n}\n\nfunc NewSpecificationFromBytes(data []byte) (Specification, error) {\n\tspec := Specification{}\n\terr := json.Unmarshal(data, &spec)\n\treturn spec, err\n}\n\nfunc ReadSwagger2Spec(filepath string) (Specification, error) {\n\tbytes, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn Specification{}, err\n\t}\n\treturn NewSpecificationFromBytes(bytes)\n}\n\ntype Info struct {\n\tDescription string `json:\"description,omitempty\"`\n\tVersion string `json:\"version,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tTermsOfService string `json:\"termsOfService,omitempty\"`\n}\n\ntype Path struct {\n\tGet Endpoint `json:\"get,omitempty\"`\n\tPost Endpoint `json:\"post,omitempty\"`\n\tPut Endpoint `json:\"put,omitempty\"`\n\tDelete Endpoint `json:\"delete,omitempty\"`\n}\n\ntype Endpoint struct {\n\tTags []string `json:\"tags,omitempty\"`\n\tSummary string `json:\"summary,omitempty\"`\n\tOperationID string `json:\"operationId,omitempmty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tConsumes []string `json:\"consumes,omitempty\"`\n\tProduces []string `json:\"produces,omitempty\"`\n\tParameters []Parameter `json:\"parameters\"`\n}\n\ntype Parameter struct {\n\tName string `json:\"name,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tIn string `json:\"in,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tRequired bool `json:\"required,omitempty\"`\n\tDefault interface{} `json:\"default,omitempty\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package comm\n\nimport (\n\t\"crypto\/rand\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\tlog \"github.com\/schollz\/logger\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestComm(t *testing.T) {\n\ttoken := make([]byte, 40000000)\n\trand.Read(token)\n\n\tport := \"8001\"\n\tgo func() {\n\t\tlog.Debugf(\"starting TCP server on \" + port)\n\t\tserver, err := net.Listen(\"tcp\", \"0.0.0.0:\"+port)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t\tdefer server.Close()\n\t\t\/\/ spawn a new goroutine whenever a client connects\n\t\tfor {\n\t\t\tconnection, err := server.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t\tlog.Debugf(\"client %s connected\", connection.RemoteAddr().String())\n\t\t\tgo func(port string, connection net.Conn) {\n\t\t\t\tc := New(connection)\n\t\t\t\terr = c.Send([]byte(\"hello, world\"))\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tdata, err := c.Receive()\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, []byte(\"hello, computer\"), data)\n\t\t\t\tdata, err = c.Receive()\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, []byte{'\\x00'}, data)\n\t\t\t\tdata, err = c.Receive()\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, token, data)\n\t\t\t}(port, connection)\n\t\t}\n\t}()\n\n\ttime.Sleep(300 * time.Millisecond)\n\ta, err := NewConnection(\"localhost:\"+port, 10*time.Minute)\n\tassert.Nil(t, err)\n\tdata, err := a.Receive()\n\tassert.Equal(t, []byte(\"hello, world\"), data)\n\tassert.Nil(t, err)\n\tassert.Nil(t, a.Send([]byte(\"hello, computer\")))\n\tassert.Nil(t, a.Send([]byte{'\\x00'}))\n\n\tassert.Nil(t, a.Send(token))\n\t_ = a.Connection()\n\ta.Close()\n\tassert.NotNil(t, a.Send(token))\n\t_, err = a.Write(token)\n\tassert.NotNil(t, err)\n\n}\n<commit_msg>test should not exceed max bytes<commit_after>package comm\n\nimport (\n\t\"crypto\/rand\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\tlog \"github.com\/schollz\/logger\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestComm(t *testing.T) {\n\ttoken := make([]byte, MAXBYTES)\n\trand.Read(token)\n\n\tport := \"8001\"\n\tgo func() {\n\t\tlog.Debugf(\"starting TCP server on \" + port)\n\t\tserver, err := net.Listen(\"tcp\", \"0.0.0.0:\"+port)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t\tdefer server.Close()\n\t\t\/\/ spawn a new goroutine whenever a client connects\n\t\tfor {\n\t\t\tconnection, err := server.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t\tlog.Debugf(\"client %s connected\", connection.RemoteAddr().String())\n\t\t\tgo func(port string, connection net.Conn) {\n\t\t\t\tc := New(connection)\n\t\t\t\terr = c.Send([]byte(\"hello, world\"))\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tdata, err := c.Receive()\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, []byte(\"hello, computer\"), data)\n\t\t\t\tdata, err = c.Receive()\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, []byte{'\\x00'}, data)\n\t\t\t\tdata, err = c.Receive()\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, token, data)\n\t\t\t}(port, connection)\n\t\t}\n\t}()\n\n\ttime.Sleep(300 * time.Millisecond)\n\ta, err := NewConnection(\"localhost:\"+port, 10*time.Minute)\n\tassert.Nil(t, err)\n\tdata, err := a.Receive()\n\tassert.Equal(t, []byte(\"hello, world\"), data)\n\tassert.Nil(t, err)\n\tassert.Nil(t, a.Send([]byte(\"hello, computer\")))\n\tassert.Nil(t, a.Send([]byte{'\\x00'}))\n\n\tassert.Nil(t, a.Send(token))\n\t_ = a.Connection()\n\ta.Close()\n\tassert.NotNil(t, a.Send(token))\n\t_, err = a.Write(token)\n\tassert.NotNil(t, err)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package conf\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/joho\/godotenv\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\t\"github.com\/netlify\/netlify-commons\/nconf\"\n)\n\n\/\/ OAuthProviderConfiguration holds all config related to external account providers.\ntype OAuthProviderConfiguration struct {\n\tClientID string `json:\"client_id\" split_words:\"true\"`\n\tSecret string `json:\"secret\"`\n\tRedirectURI string `json:\"redirect_uri\" split_words:\"true\"`\n\tURL string `json:\"url\"`\n\tEnabled bool `json:\"enabled\"`\n}\n\n\/\/ DBConfiguration holds all the database related configuration.\ntype DBConfiguration struct {\n\tDialect string `json:\"dialect\"`\n\tDriver string `json:\"driver\" required:\"true\"`\n\tURL string `json:\"url\" envconfig:\"DATABASE_URL\" required:\"true\"`\n\tNamespace string `json:\"namespace\"`\n\tAutomigrate bool `json:\"automigrate\"`\n}\n\n\/\/ JWTConfiguration holds all the JWT related configuration.\ntype JWTConfiguration struct {\n\tSecret string `json:\"secret\" required:\"true\"`\n\tExp int `json:\"exp\"`\n\tAud string `json:\"aud\"`\n\tAdminGroupName string `json:\"admin_group_name\" split_words:\"true\"`\n\tDefaultGroupName string `json:\"default_group_name\" split_words:\"true\"`\n}\n\n\/\/ GlobalConfiguration holds all the configuration that applies to all instances.\ntype GlobalConfiguration struct {\n\tAPI struct {\n\t\tHost string\n\t\tPort int `envconfig:\"PORT\" default:\"8081\"`\n\t\tEndpoint string\n\t}\n\tDB DBConfiguration\n\tExternal ExternalProviderConfiguration\n\tLogging nconf.LoggingConfig `envconfig:\"LOG\"`\n\tOperatorToken string `split_words:\"true\" required:\"true\"`\n\tMultiInstanceMode bool\n\tSMTP SMTPConfiguration\n}\n\n\/\/ EmailContentConfiguration holds the configuration for emails, both subjects and template URLs.\ntype EmailContentConfiguration struct {\n\tInvite string `json:\"invite\"`\n\tConfirmation string `json:\"confirmation\"`\n\tRecovery string `json:\"recovery\"`\n\tEmailChange string `json:\"email_change\" split_words:\"true\"`\n}\n\ntype ExternalProviderConfiguration struct {\n\tBitbucket OAuthProviderConfiguration `json:\"bitbucket\"`\n\tGithub OAuthProviderConfiguration `json:\"github\"`\n\tGitlab OAuthProviderConfiguration `json:\"gitlab\"`\n\tGoogle OAuthProviderConfiguration `json:\"google\"`\n\tFacebook OAuthProviderConfiguration `json:\"facebook\"`\n\tRedirectURL string `json:\"redirect_url\"`\n}\n\ntype SMTPConfiguration struct {\n\tMaxFrequency time.Duration `json:\"max_frequency\" split_words:\"true\"`\n\tHost string `json:\"host\"`\n\tPort int `json:\"port\" default:\"587\"`\n\tUser string `json:\"user\"`\n\tPass string `json:\"pass\"`\n\tAdminEmail string `json:\"admin_email\" split_words:\"true\"`\n}\n\n\/\/ Configuration holds all the per-instance configuration.\ntype Configuration struct {\n\tSiteURL string `json:\"site_url\" split_words:\"true\" required:\"true\"`\n\tJWT JWTConfiguration `json:\"jwt\"`\n\tSMTP SMTPConfiguration `json:\"smtp\"`\n\tMailer struct {\n\t\tAutoconfirm bool `json:\"autoconfirm\"`\n\t\tSubjects EmailContentConfiguration `json:\"subjects\"`\n\t\tTemplates EmailContentConfiguration `json:\"templates\"`\n\t\tURLPaths EmailContentConfiguration `json:\"url_paths\"`\n\t} `json:\"mailer\"`\n\tExternal ExternalProviderConfiguration `json:\"external\"`\n\tDisableSignup bool `json:\"disable_signup\" split_words:\"true\"`\n\tWebhook WebhookConfig `json:\"webhook\" split_words:\"true\"`\n\tCookie struct {\n\t\tKey string `json:\"key\"`\n\t\tDuration int `json:\"duration\"`\n\t} `json:\"cookies\"`\n}\n\nfunc loadEnvironment(filename string) error {\n\tvar err error\n\tif filename != \"\" {\n\t\terr = godotenv.Load(filename)\n\t} else {\n\t\terr = godotenv.Load()\n\t\t\/\/ handle if .env file does not exist, this is OK\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn err\n}\n\ntype WebhookConfig struct {\n\tURL string `json:\"url\"`\n\tRetries int `json:\"retries\"`\n\tTimeoutSec int `json:\"timeout_sec\"`\n\tSecret string `json:\"jwt_secret\"`\n\tEvents []string `json:\"events\"`\n}\n\nfunc (w *WebhookConfig) HasEvent(event string) bool {\n\tfor _, name := range w.Events {\n\t\tif event == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ LoadGlobal loads configuration from file and environment variables.\nfunc LoadGlobal(filename string) (*GlobalConfiguration, error) {\n\tif err := loadEnvironment(filename); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := new(GlobalConfiguration)\n\tif err := envconfig.Process(\"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := nconf.ConfigureLogging(&config.Logging); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.SMTP.MaxFrequency == 0 {\n\t\tconfig.SMTP.MaxFrequency = 15 * time.Minute\n\t}\n\treturn config, nil\n}\n\n\/\/ LoadConfig loads per-instance configuration.\nfunc LoadConfig(filename string) (*Configuration, error) {\n\tif err := loadEnvironment(filename); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := new(Configuration)\n\tif err := envconfig.Process(\"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\tconfig.ApplyDefaults()\n\treturn config, nil\n}\n\n\/\/ ApplyDefaults sets defaults for a Configuration\nfunc (config *Configuration) ApplyDefaults() {\n\tif config.JWT.AdminGroupName == \"\" {\n\t\tconfig.JWT.AdminGroupName = \"admin\"\n\t}\n\n\tif config.JWT.Exp == 0 {\n\t\tconfig.JWT.Exp = 3600\n\t}\n\n\tif config.Mailer.URLPaths.Invite == \"\" {\n\t\tconfig.Mailer.URLPaths.Invite = \"\/\"\n\t}\n\tif config.Mailer.URLPaths.Confirmation == \"\" {\n\t\tconfig.Mailer.URLPaths.Confirmation = \"\/\"\n\t}\n\tif config.Mailer.URLPaths.Recovery == \"\" {\n\t\tconfig.Mailer.URLPaths.Recovery = \"\/\"\n\t}\n\tif config.Mailer.URLPaths.EmailChange == \"\" {\n\t\tconfig.Mailer.URLPaths.EmailChange = \"\/\"\n\t}\n\n\tif config.SMTP.MaxFrequency == 0 {\n\t\tconfig.SMTP.MaxFrequency = 15 * time.Minute\n\t}\n\n\tif config.Cookie.Key == \"\" {\n\t\tconfig.Cookie.Key = \"nf_jwt\"\n\t}\n\n\tif config.Cookie.Duration == 0 {\n\t\tconfig.Cookie.Duration = 86400\n\t}\n}\n\nfunc (o *OAuthProviderConfiguration) Validate() error {\n\tif !o.Enabled {\n\t\treturn errors.New(\"Provider is not enabled\")\n\t}\n\tif o.ClientID == \"\" {\n\t\treturn errors.New(\"Missing Oauth client ID\")\n\t}\n\tif o.Secret == \"\" {\n\t\treturn errors.New(\"Missing Oauth secret\")\n\t}\n\tif o.RedirectURI == \"\" {\n\t\treturn errors.New(\"Missing redirect URI\")\n\t}\n\treturn nil\n}\n<commit_msg>Rename jwt_secret to secret for consistency<commit_after>package conf\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/joho\/godotenv\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\t\"github.com\/netlify\/netlify-commons\/nconf\"\n)\n\n\/\/ OAuthProviderConfiguration holds all config related to external account providers.\ntype OAuthProviderConfiguration struct {\n\tClientID string `json:\"client_id\" split_words:\"true\"`\n\tSecret string `json:\"secret\"`\n\tRedirectURI string `json:\"redirect_uri\" split_words:\"true\"`\n\tURL string `json:\"url\"`\n\tEnabled bool `json:\"enabled\"`\n}\n\n\/\/ DBConfiguration holds all the database related configuration.\ntype DBConfiguration struct {\n\tDialect string `json:\"dialect\"`\n\tDriver string `json:\"driver\" required:\"true\"`\n\tURL string `json:\"url\" envconfig:\"DATABASE_URL\" required:\"true\"`\n\tNamespace string `json:\"namespace\"`\n\tAutomigrate bool `json:\"automigrate\"`\n}\n\n\/\/ JWTConfiguration holds all the JWT related configuration.\ntype JWTConfiguration struct {\n\tSecret string `json:\"secret\" required:\"true\"`\n\tExp int `json:\"exp\"`\n\tAud string `json:\"aud\"`\n\tAdminGroupName string `json:\"admin_group_name\" split_words:\"true\"`\n\tDefaultGroupName string `json:\"default_group_name\" split_words:\"true\"`\n}\n\n\/\/ GlobalConfiguration holds all the configuration that applies to all instances.\ntype GlobalConfiguration struct {\n\tAPI struct {\n\t\tHost string\n\t\tPort int `envconfig:\"PORT\" default:\"8081\"`\n\t\tEndpoint string\n\t}\n\tDB DBConfiguration\n\tExternal ExternalProviderConfiguration\n\tLogging nconf.LoggingConfig `envconfig:\"LOG\"`\n\tOperatorToken string `split_words:\"true\" required:\"true\"`\n\tMultiInstanceMode bool\n\tSMTP SMTPConfiguration\n}\n\n\/\/ EmailContentConfiguration holds the configuration for emails, both subjects and template URLs.\ntype EmailContentConfiguration struct {\n\tInvite string `json:\"invite\"`\n\tConfirmation string `json:\"confirmation\"`\n\tRecovery string `json:\"recovery\"`\n\tEmailChange string `json:\"email_change\" split_words:\"true\"`\n}\n\ntype ExternalProviderConfiguration struct {\n\tBitbucket OAuthProviderConfiguration `json:\"bitbucket\"`\n\tGithub OAuthProviderConfiguration `json:\"github\"`\n\tGitlab OAuthProviderConfiguration `json:\"gitlab\"`\n\tGoogle OAuthProviderConfiguration `json:\"google\"`\n\tFacebook OAuthProviderConfiguration `json:\"facebook\"`\n\tRedirectURL string `json:\"redirect_url\"`\n}\n\ntype SMTPConfiguration struct {\n\tMaxFrequency time.Duration `json:\"max_frequency\" split_words:\"true\"`\n\tHost string `json:\"host\"`\n\tPort int `json:\"port\" default:\"587\"`\n\tUser string `json:\"user\"`\n\tPass string `json:\"pass\"`\n\tAdminEmail string `json:\"admin_email\" split_words:\"true\"`\n}\n\n\/\/ Configuration holds all the per-instance configuration.\ntype Configuration struct {\n\tSiteURL string `json:\"site_url\" split_words:\"true\" required:\"true\"`\n\tJWT JWTConfiguration `json:\"jwt\"`\n\tSMTP SMTPConfiguration `json:\"smtp\"`\n\tMailer struct {\n\t\tAutoconfirm bool `json:\"autoconfirm\"`\n\t\tSubjects EmailContentConfiguration `json:\"subjects\"`\n\t\tTemplates EmailContentConfiguration `json:\"templates\"`\n\t\tURLPaths EmailContentConfiguration `json:\"url_paths\"`\n\t} `json:\"mailer\"`\n\tExternal ExternalProviderConfiguration `json:\"external\"`\n\tDisableSignup bool `json:\"disable_signup\" split_words:\"true\"`\n\tWebhook WebhookConfig `json:\"webhook\" split_words:\"true\"`\n\tCookie struct {\n\t\tKey string `json:\"key\"`\n\t\tDuration int `json:\"duration\"`\n\t} `json:\"cookies\"`\n}\n\nfunc loadEnvironment(filename string) error {\n\tvar err error\n\tif filename != \"\" {\n\t\terr = godotenv.Load(filename)\n\t} else {\n\t\terr = godotenv.Load()\n\t\t\/\/ handle if .env file does not exist, this is OK\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn err\n}\n\ntype WebhookConfig struct {\n\tURL string `json:\"url\"`\n\tRetries int `json:\"retries\"`\n\tTimeoutSec int `json:\"timeout_sec\"`\n\tSecret string `json:\"secret\"`\n\tEvents []string `json:\"events\"`\n}\n\nfunc (w *WebhookConfig) HasEvent(event string) bool {\n\tfor _, name := range w.Events {\n\t\tif event == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ LoadGlobal loads configuration from file and environment variables.\nfunc LoadGlobal(filename string) (*GlobalConfiguration, error) {\n\tif err := loadEnvironment(filename); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := new(GlobalConfiguration)\n\tif err := envconfig.Process(\"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := nconf.ConfigureLogging(&config.Logging); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.SMTP.MaxFrequency == 0 {\n\t\tconfig.SMTP.MaxFrequency = 15 * time.Minute\n\t}\n\treturn config, nil\n}\n\n\/\/ LoadConfig loads per-instance configuration.\nfunc LoadConfig(filename string) (*Configuration, error) {\n\tif err := loadEnvironment(filename); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := new(Configuration)\n\tif err := envconfig.Process(\"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\tconfig.ApplyDefaults()\n\treturn config, nil\n}\n\n\/\/ ApplyDefaults sets defaults for a Configuration\nfunc (config *Configuration) ApplyDefaults() {\n\tif config.JWT.AdminGroupName == \"\" {\n\t\tconfig.JWT.AdminGroupName = \"admin\"\n\t}\n\n\tif config.JWT.Exp == 0 {\n\t\tconfig.JWT.Exp = 3600\n\t}\n\n\tif config.Mailer.URLPaths.Invite == \"\" {\n\t\tconfig.Mailer.URLPaths.Invite = \"\/\"\n\t}\n\tif config.Mailer.URLPaths.Confirmation == \"\" {\n\t\tconfig.Mailer.URLPaths.Confirmation = \"\/\"\n\t}\n\tif config.Mailer.URLPaths.Recovery == \"\" {\n\t\tconfig.Mailer.URLPaths.Recovery = \"\/\"\n\t}\n\tif config.Mailer.URLPaths.EmailChange == \"\" {\n\t\tconfig.Mailer.URLPaths.EmailChange = \"\/\"\n\t}\n\n\tif config.SMTP.MaxFrequency == 0 {\n\t\tconfig.SMTP.MaxFrequency = 15 * time.Minute\n\t}\n\n\tif config.Cookie.Key == \"\" {\n\t\tconfig.Cookie.Key = \"nf_jwt\"\n\t}\n\n\tif config.Cookie.Duration == 0 {\n\t\tconfig.Cookie.Duration = 86400\n\t}\n}\n\nfunc (o *OAuthProviderConfiguration) Validate() error {\n\tif !o.Enabled {\n\t\treturn errors.New(\"Provider is not enabled\")\n\t}\n\tif o.ClientID == \"\" {\n\t\treturn errors.New(\"Missing Oauth client ID\")\n\t}\n\tif o.Secret == \"\" {\n\t\treturn errors.New(\"Missing Oauth secret\")\n\t}\n\tif o.RedirectURI == \"\" {\n\t\treturn errors.New(\"Missing redirect URI\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package conf\n\nimport (\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/netlify\/netlify-commons\/nconf\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ ExternalConfiguration holds all config related to external account providers.\ntype ExternalConfiguration struct {\n\tClientID string `json:\"client_id\"`\n\tSecret string `json:\"secret\"`\n\tRedirectURI string `json:\"redirect_uri\"`\n\tURL string `json:\"url\"`\n}\n\n\/\/ DBConfiguration holds all the database related configuration.\ntype DBConfiguration struct {\n\tDriver string `json:\"driver\"`\n\tConnURL string `json:\"url\"`\n\tNamespace string `json:\"namespace\"`\n\tAutomigrate bool `json:\"automigrate\"`\n}\n\n\/\/ JWTConfiguration holds all the JWT related configuration.\ntype JWTConfiguration struct {\n\tSecret string `json:\"secret\"`\n\tExp int `json:\"exp\"`\n\tAud string `json:\"aud\"`\n\tAdminGroupName string `json:\"admin_group_name\"`\n\tAdminGroupDisabled bool `json:\"admin_group_disabled\"`\n\tDefaultGroupName string `json:\"default_group_name\"`\n}\n\n\/\/ GlobalConfiguration holds all the configuration that applies to all instances.\ntype GlobalConfiguration struct {\n\tAPI struct {\n\t\tHost string `json:\"host\"`\n\t\tPort int `json:\"port\"`\n\t\tEndpoint string `json:\"endpoint\"`\n\t} `json:\"api\"`\n\tDB DBConfiguration `json:\"db\"`\n\tLogging nconf.LoggingConfig `json:\"log_conf\"`\n\tNetlifySecret string `json:\"netlify_secret\"`\n\tMultiInstanceMode bool `json:\"-\"`\n}\n\n\/\/ Configuration holds all the per-instance configuration.\ntype Configuration struct {\n\tSiteURL string `json:\"site_url\"`\n\tJWT JWTConfiguration `json:\"jwt\"`\n\tMailer struct {\n\t\tMaxFrequency time.Duration `json:\"max_frequency\"`\n\t\tAutoconfirm bool `json:\"autoconfirm\"`\n\t\tHost string `json:\"host\"`\n\t\tPort int `json:\"port\"`\n\t\tUser string `json:\"user\"`\n\t\tPass string `json:\"pass\"`\n\t\tMemberFolder string `json:\"member_folder\"`\n\t\tAdminEmail string `json:\"admin_email\"`\n\t\tSubjects struct {\n\t\t\tInvite string `json:\"invite\"`\n\t\t\tConfirmation string `json:\"confirmation\"`\n\t\t\tRecovery string `json:\"recovery\"`\n\t\t\tEmailChange string `json:\"email_change\"`\n\t\t} `json:\"subjects\"`\n\t\tTemplates struct {\n\t\t\tInvite string `json:\"invite\"`\n\t\t\tConfirmation string `json:\"confirmation\"`\n\t\t\tRecovery string `json:\"recovery\"`\n\t\t\tEmailChange string `json:\"email_change\"`\n\t\t} `json:\"templates\"`\n\t} `json:\"mailer\"`\n\tExternal struct {\n\t\tGithub ExternalConfiguration `json:\"github\"`\n\t\tBitbucket ExternalConfiguration `json:\"bitbucket\"`\n\t\tGitlab ExternalConfiguration `json:\"gitlab\"`\n\t} `json:\"external\"`\n}\n\n\/\/ LoadGlobalFromFile loads global configuration from the provided filename.\nfunc LoadGlobalFromFile(name string) (*GlobalConfiguration, error) {\n\tcmd := &cobra.Command{}\n\tconfig := \"\"\n\tcmd.Flags().StringVar(&config, \"config\", \"config.test.json\", \"Config file\")\n\tcmd.Flags().Set(\"config\", name)\n\treturn LoadGlobal(cmd)\n}\n\n\/\/ LoadGlobal loads configuration from file and environment variables.\nfunc LoadGlobal(cmd *cobra.Command) (*GlobalConfiguration, error) {\n\tconfig := new(GlobalConfiguration)\n\n\tif err := nconf.LoadConfig(cmd, \"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.DB.ConnURL == \"\" && os.Getenv(\"DATABASE_URL\") != \"\" {\n\t\tconfig.DB.ConnURL = os.Getenv(\"DATABASE_URL\")\n\t}\n\n\tif config.API.Port == 0 && os.Getenv(\"PORT\") != \"\" {\n\t\tport, err := strconv.Atoi(os.Getenv(\"PORT\"))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"formatting PORT into int\")\n\t\t}\n\n\t\tconfig.API.Port = port\n\t}\n\n\tif _, err := nconf.ConfigureLogging(&config.Logging); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn config, nil\n}\n\n\/\/ LoadConfigFromFile loads per-instance configuration from the provided filename.\nfunc LoadConfigFromFile(name string) (*Configuration, error) {\n\tcmd := &cobra.Command{}\n\tconfig := \"\"\n\tcmd.Flags().StringVar(&config, \"config\", \"config.test.json\", \"Config file\")\n\tcmd.Flags().Set(\"config\", name)\n\treturn LoadConfig(cmd)\n}\n\n\/\/ LoadConfig loads per-instance configuration.\nfunc LoadConfig(cmd *cobra.Command) (*Configuration, error) {\n\tconfig := new(Configuration)\n\tif err := nconf.LoadConfig(cmd, \"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.ApplyDefaults()\n\treturn config, nil\n}\n\nfunc (config *Configuration) ApplyDefaults() {\n\tif config.JWT.AdminGroupName == \"\" {\n\t\tconfig.JWT.AdminGroupName = \"admin\"\n\t}\n\n\tif config.JWT.Exp == 0 {\n\t\tconfig.JWT.Exp = 3600\n\t}\n\n\tif config.Mailer.MaxFrequency == 0 {\n\t\tconfig.Mailer.MaxFrequency = 15 * time.Minute\n\t}\n\n\tif config.Mailer.MemberFolder == \"\" {\n\t\tconfig.Mailer.MemberFolder = \"\/member\"\n\t}\n\n\tif config.Mailer.Templates.Invite == \"\" {\n\t\tconfig.Mailer.Templates.Invite = \"\/.netlify\/gotrue\/templates\/invite.html\"\n\t}\n\tif config.Mailer.Templates.Confirmation == \"\" {\n\t\tconfig.Mailer.Templates.Confirmation = \"\/.netlify\/gotrue\/templates\/confirm.html\"\n\t}\n\tif config.Mailer.Templates.Recovery == \"\" {\n\t\tconfig.Mailer.Templates.Recovery = \"\/.netlify\/gotrue\/templates\/recover.html\"\n\t}\n\tif config.Mailer.Templates.EmailChange == \"\" {\n\t\tconfig.Mailer.Templates.EmailChange = \"\/.netlify\/gotrue\/templates\/email-change.html\"\n\t}\n}\n<commit_msg>Add default port of 8081<commit_after>package conf\n\nimport (\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/netlify\/netlify-commons\/nconf\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ ExternalConfiguration holds all config related to external account providers.\ntype ExternalConfiguration struct {\n\tClientID string `json:\"client_id\"`\n\tSecret string `json:\"secret\"`\n\tRedirectURI string `json:\"redirect_uri\"`\n\tURL string `json:\"url\"`\n}\n\n\/\/ DBConfiguration holds all the database related configuration.\ntype DBConfiguration struct {\n\tDriver string `json:\"driver\"`\n\tConnURL string `json:\"url\"`\n\tNamespace string `json:\"namespace\"`\n\tAutomigrate bool `json:\"automigrate\"`\n}\n\n\/\/ JWTConfiguration holds all the JWT related configuration.\ntype JWTConfiguration struct {\n\tSecret string `json:\"secret\"`\n\tExp int `json:\"exp\"`\n\tAud string `json:\"aud\"`\n\tAdminGroupName string `json:\"admin_group_name\"`\n\tAdminGroupDisabled bool `json:\"admin_group_disabled\"`\n\tDefaultGroupName string `json:\"default_group_name\"`\n}\n\n\/\/ GlobalConfiguration holds all the configuration that applies to all instances.\ntype GlobalConfiguration struct {\n\tAPI struct {\n\t\tHost string `json:\"host\"`\n\t\tPort int `json:\"port\"`\n\t\tEndpoint string `json:\"endpoint\"`\n\t} `json:\"api\"`\n\tDB DBConfiguration `json:\"db\"`\n\tLogging nconf.LoggingConfig `json:\"log_conf\"`\n\tNetlifySecret string `json:\"netlify_secret\"`\n\tMultiInstanceMode bool `json:\"-\"`\n}\n\n\/\/ Configuration holds all the per-instance configuration.\ntype Configuration struct {\n\tSiteURL string `json:\"site_url\"`\n\tJWT JWTConfiguration `json:\"jwt\"`\n\tMailer struct {\n\t\tMaxFrequency time.Duration `json:\"max_frequency\"`\n\t\tAutoconfirm bool `json:\"autoconfirm\"`\n\t\tHost string `json:\"host\"`\n\t\tPort int `json:\"port\"`\n\t\tUser string `json:\"user\"`\n\t\tPass string `json:\"pass\"`\n\t\tMemberFolder string `json:\"member_folder\"`\n\t\tAdminEmail string `json:\"admin_email\"`\n\t\tSubjects struct {\n\t\t\tInvite string `json:\"invite\"`\n\t\t\tConfirmation string `json:\"confirmation\"`\n\t\t\tRecovery string `json:\"recovery\"`\n\t\t\tEmailChange string `json:\"email_change\"`\n\t\t} `json:\"subjects\"`\n\t\tTemplates struct {\n\t\t\tInvite string `json:\"invite\"`\n\t\t\tConfirmation string `json:\"confirmation\"`\n\t\t\tRecovery string `json:\"recovery\"`\n\t\t\tEmailChange string `json:\"email_change\"`\n\t\t} `json:\"templates\"`\n\t} `json:\"mailer\"`\n\tExternal struct {\n\t\tGithub ExternalConfiguration `json:\"github\"`\n\t\tBitbucket ExternalConfiguration `json:\"bitbucket\"`\n\t\tGitlab ExternalConfiguration `json:\"gitlab\"`\n\t} `json:\"external\"`\n}\n\n\/\/ LoadGlobalFromFile loads global configuration from the provided filename.\nfunc LoadGlobalFromFile(name string) (*GlobalConfiguration, error) {\n\tcmd := &cobra.Command{}\n\tconfig := \"\"\n\tcmd.Flags().StringVar(&config, \"config\", \"config.test.json\", \"Config file\")\n\tcmd.Flags().Set(\"config\", name)\n\treturn LoadGlobal(cmd)\n}\n\n\/\/ LoadGlobal loads configuration from file and environment variables.\nfunc LoadGlobal(cmd *cobra.Command) (*GlobalConfiguration, error) {\n\tconfig := new(GlobalConfiguration)\n\n\tif err := nconf.LoadConfig(cmd, \"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.DB.ConnURL == \"\" && os.Getenv(\"DATABASE_URL\") != \"\" {\n\t\tconfig.DB.ConnURL = os.Getenv(\"DATABASE_URL\")\n\t}\n\n\tif config.API.Port == 0 && os.Getenv(\"PORT\") != \"\" {\n\t\tport, err := strconv.Atoi(os.Getenv(\"PORT\"))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"formatting PORT into int\")\n\t\t}\n\n\t\tconfig.API.Port = port\n\t}\n\n\tif config.API.Port == 0 && config.API.Host == \"\" {\n\t\tconfig.API.Port = 8081\n\t}\n\n\tif _, err := nconf.ConfigureLogging(&config.Logging); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn config, nil\n}\n\n\/\/ LoadConfigFromFile loads per-instance configuration from the provided filename.\nfunc LoadConfigFromFile(name string) (*Configuration, error) {\n\tcmd := &cobra.Command{}\n\tconfig := \"\"\n\tcmd.Flags().StringVar(&config, \"config\", \"config.test.json\", \"Config file\")\n\tcmd.Flags().Set(\"config\", name)\n\treturn LoadConfig(cmd)\n}\n\n\/\/ LoadConfig loads per-instance configuration.\nfunc LoadConfig(cmd *cobra.Command) (*Configuration, error) {\n\tconfig := new(Configuration)\n\tif err := nconf.LoadConfig(cmd, \"gotrue\", config); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.ApplyDefaults()\n\treturn config, nil\n}\n\nfunc (config *Configuration) ApplyDefaults() {\n\tif config.JWT.AdminGroupName == \"\" {\n\t\tconfig.JWT.AdminGroupName = \"admin\"\n\t}\n\n\tif config.JWT.Exp == 0 {\n\t\tconfig.JWT.Exp = 3600\n\t}\n\n\tif config.Mailer.MaxFrequency == 0 {\n\t\tconfig.Mailer.MaxFrequency = 15 * time.Minute\n\t}\n\n\tif config.Mailer.MemberFolder == \"\" {\n\t\tconfig.Mailer.MemberFolder = \"\/member\"\n\t}\n\n\tif config.Mailer.Templates.Invite == \"\" {\n\t\tconfig.Mailer.Templates.Invite = \"\/.netlify\/gotrue\/templates\/invite.html\"\n\t}\n\tif config.Mailer.Templates.Confirmation == \"\" {\n\t\tconfig.Mailer.Templates.Confirmation = \"\/.netlify\/gotrue\/templates\/confirm.html\"\n\t}\n\tif config.Mailer.Templates.Recovery == \"\" {\n\t\tconfig.Mailer.Templates.Recovery = \"\/.netlify\/gotrue\/templates\/recover.html\"\n\t}\n\tif config.Mailer.Templates.EmailChange == \"\" {\n\t\tconfig.Mailer.Templates.EmailChange = \"\/.netlify\/gotrue\/templates\/email-change.html\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The OpenPitrix Authors. All rights reserved.\n\/\/ Use of this source code is governed by a Apache license\n\/\/ that can be found in the LICENSE file.\n\npackage pilot\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"openpitrix.io\/openpitrix\/pkg\/constants\"\n\t\"openpitrix.io\/openpitrix\/pkg\/logger\"\n\t\"openpitrix.io\/openpitrix\/pkg\/manager\"\n\t\"openpitrix.io\/openpitrix\/pkg\/pb\"\n\t\"openpitrix.io\/openpitrix\/pkg\/utils\"\n)\n\nfunc NewPilotManagerClient(ctx context.Context) (pb.PilotManagerClient, error) {\n\tconn, err := manager.NewClient(ctx, constants.PilotManagerHost, constants.PilotManagerPort)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pb.NewPilotManagerClient(conn), err\n}\n\nfunc HandleSubtask(subtaskRequest *pb.HandleSubtaskRequest) error {\n\tctx := context.Background()\n\tclient, err := NewPilotManagerClient(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.HandleSubtask(ctx, subtaskRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetSubtaskStatus(subtaskStatusRequest *pb.GetSubtaskStatusRequest) (*pb.GetSubtaskStatusResponse, error) {\n\tctx := context.Background()\n\tclient, err := NewPilotManagerClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubtaskStatusResponse, err := client.GetSubtaskStatus(ctx, subtaskStatusRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn subtaskStatusResponse, err\n}\n\nfunc WaitSubtask(taskId string, timeout time.Duration, waitInterval time.Duration) error {\n\tlogger.Debug(\"Waiting for task [%s] finished\", taskId)\n\treturn utils.WaitForSpecificOrError(func() (bool, error) {\n\t\ttaskStatusRequest := &pb.GetSubtaskStatusRequest{\n\t\t\tSubtaskId: []string{taskId},\n\t\t}\n\t\ttaskStatusResponse, err := GetSubtaskStatus(taskStatusRequest)\n\t\tif err != nil {\n\t\t\t\/\/network or api error, not considered task fail.\n\t\t\treturn false, nil\n\t\t}\n\t\tif len(taskStatusResponse.SubtaskStatusSet) == 0 {\n\t\t\treturn false, fmt.Errorf(\"Can not find task [%s]. \", taskId)\n\t\t}\n\t\tt := taskStatusResponse.SubtaskStatusSet[0]\n\t\tif t.Status == nil {\n\t\t\tlogger.Errorf(\"Task [%s] status is nil\", taskId)\n\t\t\treturn false, nil\n\t\t}\n\t\tif t.Status.GetValue() == constants.StatusWorking || t.Status.GetValue() == constants.StatusPending {\n\t\t\treturn false, nil\n\t\t}\n\t\tif t.Status.GetValue() == constants.StatusSuccessful {\n\t\t\treturn true, nil\n\t\t}\n\t\tif t.Status.GetValue() == constants.StatusFailed {\n\t\t\treturn false, fmt.Errorf(\"Task [%s] failed. \", taskId)\n\t\t}\n\t\tlogger.Errorf(\"Unknown status [%s] for task [%s]. \", t.Status.GetValue(), taskId)\n\t\treturn false, nil\n\t}, timeout, waitInterval)\n}\n<commit_msg>pkg\/client\/pilot: fix build<commit_after>\/\/ Copyright 2018 The OpenPitrix Authors. All rights reserved.\n\/\/ Use of this source code is governed by a Apache license\n\/\/ that can be found in the LICENSE file.\n\npackage pilot\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"openpitrix.io\/openpitrix\/pkg\/constants\"\n\t\"openpitrix.io\/openpitrix\/pkg\/logger\"\n\t\"openpitrix.io\/openpitrix\/pkg\/manager\"\n\t\"openpitrix.io\/openpitrix\/pkg\/pb\"\n\t\"openpitrix.io\/openpitrix\/pkg\/utils\"\n)\n\nfunc NewPilotManagerClient(ctx context.Context) (pb.PilotServiceClient, error) {\n\tconn, err := manager.NewClient(ctx, constants.PilotManagerHost, constants.PilotManagerPort)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pb.NewPilotServiceClient(conn), err\n}\n\nfunc HandleSubtask(subtaskRequest *pb.HandleSubtaskRequest) error {\n\tctx := context.Background()\n\tclient, err := NewPilotManagerClient(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.HandleSubtask(ctx, subtaskRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetSubtaskStatus(subtaskStatusRequest *pb.GetSubtaskStatusRequest) (*pb.GetSubtaskStatusResponse, error) {\n\tctx := context.Background()\n\tclient, err := NewPilotManagerClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubtaskStatusResponse, err := client.GetSubtaskStatus(ctx, subtaskStatusRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn subtaskStatusResponse, err\n}\n\nfunc WaitSubtask(taskId string, timeout time.Duration, waitInterval time.Duration) error {\n\tlogger.Debug(\"Waiting for task [%s] finished\", taskId)\n\treturn utils.WaitForSpecificOrError(func() (bool, error) {\n\t\ttaskStatusRequest := &pb.GetSubtaskStatusRequest{\n\t\t\tSubtaskId: []string{taskId},\n\t\t}\n\t\ttaskStatusResponse, err := GetSubtaskStatus(taskStatusRequest)\n\t\tif err != nil {\n\t\t\t\/\/network or api error, not considered task fail.\n\t\t\treturn false, nil\n\t\t}\n\t\tif len(taskStatusResponse.SubtaskStatusSet) == 0 {\n\t\t\treturn false, fmt.Errorf(\"Can not find task [%s]. \", taskId)\n\t\t}\n\t\tt := taskStatusResponse.SubtaskStatusSet[0]\n\t\tif t.Status == nil {\n\t\t\tlogger.Errorf(\"Task [%s] status is nil\", taskId)\n\t\t\treturn false, nil\n\t\t}\n\t\tif t.Status.GetValue() == constants.StatusWorking || t.Status.GetValue() == constants.StatusPending {\n\t\t\treturn false, nil\n\t\t}\n\t\tif t.Status.GetValue() == constants.StatusSuccessful {\n\t\t\treturn true, nil\n\t\t}\n\t\tif t.Status.GetValue() == constants.StatusFailed {\n\t\t\treturn false, fmt.Errorf(\"Task [%s] failed. \", taskId)\n\t\t}\n\t\tlogger.Errorf(\"Unknown status [%s] for task [%s]. \", t.Status.GetValue(), taskId)\n\t\treturn false, nil\n\t}, timeout, waitInterval)\n}\n<|endoftext|>"} {"text":"<commit_before>package discovery\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"net\/http\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tetcd \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/matt-deboer\/etcdcd\/pkg\/platform\"\n)\n\n\/\/ Discovery provides correct startup details for etcd with respect to\n\/\/ known vs. expected cluster membership\ntype Discovery struct {\n\tConfigFile string\n\tPlatform string\n\tClientPort int\n\tServerPort int\n\tClientScheme string\n\tServerScheme string\n\tMaxTries int\n\tProxyMode bool\n\tMasterFilter string\n\tDryRun bool\n\tIgnoreNamingMismatch bool\n\tMinimumUptimeToJoin time.Duration\n}\n\nfunc findMemberByName(members []etcd.Member, name string) *etcd.Member {\n\tfor _, member := range members {\n\t\tif name == member.Name {\n\t\t\treturn &member\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc containsMember(members []etcd.Member, member etcd.Member) bool {\n\tfor _, m := range members {\n\t\tfor _, peerURL := range m.PeerURLs {\n\t\t\tfor _, memberPeerURL := range member.PeerURLs {\n\t\t\t\tif peerURL == memberPeerURL {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ DiscoverEnvironment produces an environment hash\nfunc (d *Discovery) DiscoverEnvironment() (map[string]string, error) {\n\n\tp, err := platform.Get(d.Platform, d.ConfigFile)\n\tif p == nil {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.New(\"No such platform: \" + d.Platform)\n\t}\n\n\tvar expectedMembers []etcd.Member\n\tfor tries := 0; tries < d.MaxTries && len(expectedMembers) == 0; tries++ {\n\t\tif members, err := p.ExpectedMembers(d.MasterFilter, d.ClientScheme,\n\t\t\td.ClientPort, d.ServerScheme, d.ServerPort); err == nil {\n\t\t\tfor _, m := range members {\n\t\t\t\t\/\/ have to cast here because of golang type-system--ugh!\n\t\t\t\texpectedMembers = append(expectedMembers, etcd.Member(m))\n\t\t\t\tif len(m.PeerURLs) == 0 {\n\t\t\t\t\tlog.Fatalf(\"Platform %s returned an invalid member which will be ignored: %#v\", d.Platform, m)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(expectedMembers) == 0 {\n\t\t\tsleepTime := (2 * time.Second)\n\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\tlog.Debugf(\"Failed to resolve expected members; sleeping for %s\", sleepTime)\n\t\t\t}\n\t\t\ttime.Sleep(sleepTime)\n\t\t}\n\t}\n\n\tif len(expectedMembers) == 0 {\n\t\tlog.Fatal(\"Failed to determine expected members\")\n\t} else if log.GetLevel() >= log.DebugLevel {\n\t\tlog.Debugf(\"Expected cluster members: %v#\", expectedMembers)\n\t}\n\tsort.Slice(expectedMembers, func(i, j int) bool { return expectedMembers[i].Name < expectedMembers[j].Name })\n\n\tlocalMaster := findMemberByName(expectedMembers, p.LocalInstanceName())\n\tmembersAPI, currentMembers, uptime, err := d.resolveMembersAndAPI(expectedMembers, localMaster)\n\n\tenvironment := map[string]string{}\n\tenvironment[\"ETCD_NAME\"] = p.LocalInstanceName()\n\tenvironment[\"ETCD_INITIAL_CLUSTER\"] = initialClusterString(expectedMembers)\n\n\tif localMaster != nil {\n\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\tlog.Debugf(\"Local master: %#v\", *localMaster)\n\t\t}\n\t\t\/\/ this instance is an expected master\n\t\tif len(currentMembers) > 0 && !containsMember(currentMembers, *localMaster) && uptime >= d.MinimumUptimeToJoin {\n\t\t\t\/\/ there is an existing cluster\n\t\t\tif err = d.assertSaneClusterState(expectedMembers, currentMembers); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\td.evictBadPeers(membersAPI, expectedMembers, currentMembers)\n\t\t\tlog.Infof(\"Joining existing cluster as a master\")\n\t\t\t\/\/ TODO: what if we encounter a state where not of the expected masters are\n\t\t\t\/\/ members of the current cluster?\n\t\t\tif err := d.joinExistingCluster(membersAPI, expectedMembers, localMaster); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tenvironment[\"ETCD_INITIAL_CLUSTER_STATE\"] = \"existing\"\n\t\t} else {\n\t\t\tlog.Infof(\"Creating a new cluster\")\n\t\t\tenvironment[\"ETCD_INITIAL_CLUSTER_STATE\"] = \"new\"\n\t\t}\n\t} else if d.ProxyMode {\n\t\tlog.Infof(\"Proxying existing cluster\")\n\t\tenvironment[\"ETCD_INITIAL_CLUSTER_STATE\"] = \"existing\"\n\t\tenvironment[\"ETCD_PROXY\"] = \"on\"\n\t} else {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Invalid cluster configuration: localhost (%s) is not an expected master, and not in proxy mode\",\n\t\t\tp.LocalInstanceName())\n\t}\n\treturn environment, nil\n}\n\nfunc initialClusterString(members []etcd.Member) string {\n\tif len(members) > 0 {\n\t\tinitialCluster := make([]string, 0, len(members))\n\t\tfor _, m := range members {\n\t\t\tmember := fmt.Sprintf(\"%s=%s\", m.Name, m.PeerURLs[0])\n\t\t\tinitialCluster = append(initialCluster, member)\n\t\t}\n\t\treturn strings.Join(initialCluster, \",\")\n\t}\n\treturn \"\"\n}\n\n\/\/ Check for mismatched names between expected and current members with\n\/\/ matching peer URLs; also check for lack of intersection between\n\/\/ expected and current members--indicating an invalid current (or expected)\n\/\/ cluster state\nfunc (d *Discovery) assertSaneClusterState(expectedMembers []etcd.Member, currentMembers []etcd.Member) error {\n\tpartialMatchCount := 0\n\tfor _, current := range currentMembers {\n\t\tfor _, expected := range expectedMembers {\n\t\t\tmatchingPeerURL := \"\"\n\t\t\tfor _, expectedPeerURL := range expected.PeerURLs {\n\t\t\t\tfor _, currentPeerURL := range current.PeerURLs {\n\t\t\t\t\tif expectedPeerURL == currentPeerURL {\n\t\t\t\t\t\tmatchingPeerURL = expectedPeerURL\n\t\t\t\t\t\tpartialMatchCount++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(matchingPeerURL) > 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(matchingPeerURL) > 0 && len(current.Name) > 0 && current.Name != expected.Name {\n\t\t\t\tif !d.IgnoreNamingMismatch {\n\t\t\t\t\treturn fmt.Errorf(\"Expected peer %s with peer URL %s already exists with a different name: %s\",\n\t\t\t\t\t\texpected.Name, matchingPeerURL, current.Name)\n\t\t\t\t} else if log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\tlog.Debugf(\"Ignoring expected peer %s with peer URL %s already exists with a different name: %s\",\n\t\t\t\t\t\texpected.Name, matchingPeerURL, current.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif partialMatchCount == 0 && len(expectedMembers) > 0 && len(currentMembers) > 0 {\n\t\texpectedJSON, _ := json.Marshal(expectedMembers)\n\t\tcurrentJSON, _ := json.Marshal(currentMembers)\n\t\treturn fmt.Errorf(\"Invalid cluster state: found no intersection between peer URLs of expected members %s and current members %s\",\n\t\t\texpectedJSON, currentJSON)\n\t}\n\n\treturn nil\n}\n\nfunc (d *Discovery) evictBadPeers(membersAPI etcd.MembersAPI, expectedMembers []etcd.Member, currentMembers []etcd.Member) {\n\n\tfor _, peer := range currentMembers {\n\t\tif !containsMember(expectedMembers, peer) {\n\t\t\tmsg := fmt.Sprintf(\"Ejecting bad peer %s %v from the cluster:\", peer.Name, peer.PeerURLs)\n\t\t\tif d.DryRun {\n\t\t\t\tlog.Infof(\"DRY_RUN: would have ejected peer %s %v from the cluster\", peer.Name, peer.PeerURLs)\n\t\t\t} else {\n\t\t\t\tfor tries := 0; tries < d.MaxTries; tries++ {\n\t\t\t\t\terr := membersAPI.Remove(context.Background(), peer.ID)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tlog.Infof(\"%s DONE\", msg)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else if (tries + 1) == d.MaxTries {\n\t\t\t\t\t\tlog.Errorf(\"%s ERROR: %v\", msg, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *Discovery) joinExistingCluster(membersAPI etcd.MembersAPI,\n\texpectedMembers []etcd.Member, localMember *etcd.Member) error {\n\n\tmsg := \"Joining existing cluster: \"\n\tfor tries := 0; tries < d.MaxTries; tries++ {\n\t\tif d.DryRun {\n\t\t\tlog.Infof(\"DRY_RUN: would have added %s %v to the cluster\", localMember.Name, localMember.PeerURLs)\n\t\t} else {\n\t\t\t_, err := membersAPI.Add(context.Background(), localMember.PeerURLs[0])\n\t\t\tif err == nil {\n\t\t\t\tlog.Infof(\"%s DONE\", msg)\n\t\t\t\tbreak\n\t\t\t} else if (tries + 1) == d.MaxTries {\n\t\t\t\tlog.Errorf(\"%s ERROR: %v\", msg, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\tlog.Debugf(\"Retryable error attempting to add local master %#v: %v\", localMember, err)\n\t\t\t}\n\t\t\tmembersAPI, _, _, err = d.resolveMembersAndAPI(expectedMembers, localMember)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s ERROR: %v\", msg, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Discovery) resolveMembersAndAPI(expectedMembers []etcd.Member,\n\tlocalMember *etcd.Member) (membersAPI etcd.MembersAPI, currentMembers []etcd.Member, uptime time.Duration, err error) {\n\n\tctx := context.Background()\n\tvar lastErr error\n\tfor tries := 0; tries <= d.MaxTries; tries++ {\n\t\tfor _, member := range expectedMembers {\n\t\t\t\/\/ don't attempt self connection; afterall, this is intended as a pre-cursor\n\t\t\t\/\/ to the actual etcd service on the local host\n\t\t\tif localMember != nil && member.PeerURLs[0] != localMember.PeerURLs[0] {\n\t\t\t\tcfg := etcd.Config{\n\t\t\t\t\tEndpoints: member.ClientURLs,\n\t\t\t\t\tTransport: etcd.DefaultTransport,\n\t\t\t\t\t\/\/ set timeout per request to fail fast when the target endpoint is unavailable\n\t\t\t\t\tHeaderTimeoutPerRequest: time.Second,\n\t\t\t\t}\n\t\t\t\tetcdClient, err := etcd.New(cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error connecting to %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmembersAPI = etcd.NewMembersAPI(etcdClient)\n\t\t\t\tleader, err := membersAPI.Leader(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error getting leader %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t} else if leader == nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error getting leader %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = errors.New(\"Failed to resolve cluster leader\")\n\t\t\t\t}\n\n\t\t\t\tcurrentMembers, err = membersAPI.List(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error listing members %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tuptime, err = getUptime(member.ClientURLs[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error listing leader uptime %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t} else if log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\tlog.Debugf(\"Member %s reported current leader uptime is %s\", member.Name, uptime)\n\t\t\t\t}\n\n\t\t\t\t\/\/ sanity-check the returned members; it may be partial in case of a yet-forming cluster\n\t\t\t\thasInvalidMembers := false\n\t\t\t\tfor _, m := range currentMembers {\n\t\t\t\t\tif len(m.Name) == 0 || len(m.PeerURLs) == 0 {\n\t\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\t\tlog.Debugf(\"Returned actual member list contains invalid member: %#v\", m)\n\t\t\t\t\t\t}\n\t\t\t\t\t\thasInvalidMembers = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !hasInvalidMembers {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Actual cluster members: %#v\", currentMembers)\n\t\t\t\t\t}\n\t\t\t\t\treturn membersAPI, currentMembers, uptime, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(currentMembers) == 0 {\n\t\t\t\/\/ TODO: what's our timeout here?\n\t\t\tsleepTime := (1 * time.Second)\n\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\tlog.Debugf(\"Failed to resolve members; sleeping for %s\", sleepTime)\n\t\t\t}\n\t\t\ttime.Sleep(sleepTime)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil, nil, time.Millisecond, lastErr\n}\n\nfunc getUptime(endpoint string) (time.Duration, error) {\n\tresp, err := http.DefaultClient.Get(endpoint + \"\/v2\/stats\/self\")\n\tif err != nil {\n\t\treturn time.Millisecond, err\n\t}\n\n\tdefer resp.Body.Close()\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn time.Millisecond, err\n\t}\n\tstats := make(map[string]interface{})\n\terr = json.Unmarshal(contents, &stats)\n\tif err != nil {\n\t\treturn time.Millisecond, err\n\t}\n\tif leaderInfo, ok := stats[\"leaderInfo\"]; ok {\n\t\tif uptimeString, ok := leaderInfo.(map[string]interface{})[\"uptime\"]; ok {\n\t\t\treturn time.ParseDuration(uptimeString.(string))\n\t\t}\n\t}\n\treturn time.Millisecond, fmt.Errorf(\"Missing leader uptime info for endpiont %s\", endpoint)\n}\n<commit_msg>lack of name does not an invalide member make<commit_after>package discovery\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"net\/http\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tetcd \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/matt-deboer\/etcdcd\/pkg\/platform\"\n)\n\n\/\/ Discovery provides correct startup details for etcd with respect to\n\/\/ known vs. expected cluster membership\ntype Discovery struct {\n\tConfigFile string\n\tPlatform string\n\tClientPort int\n\tServerPort int\n\tClientScheme string\n\tServerScheme string\n\tMaxTries int\n\tProxyMode bool\n\tMasterFilter string\n\tDryRun bool\n\tIgnoreNamingMismatch bool\n\tMinimumUptimeToJoin time.Duration\n}\n\nfunc findMemberByName(members []etcd.Member, name string) *etcd.Member {\n\tfor _, member := range members {\n\t\tif name == member.Name {\n\t\t\treturn &member\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc containsMember(members []etcd.Member, member etcd.Member) bool {\n\tfor _, m := range members {\n\t\tfor _, peerURL := range m.PeerURLs {\n\t\t\tfor _, memberPeerURL := range member.PeerURLs {\n\t\t\t\tif peerURL == memberPeerURL {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ DiscoverEnvironment produces an environment hash\nfunc (d *Discovery) DiscoverEnvironment() (map[string]string, error) {\n\n\tp, err := platform.Get(d.Platform, d.ConfigFile)\n\tif p == nil {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.New(\"No such platform: \" + d.Platform)\n\t}\n\n\tvar expectedMembers []etcd.Member\n\tfor tries := 0; tries < d.MaxTries && len(expectedMembers) == 0; tries++ {\n\t\tif members, err := p.ExpectedMembers(d.MasterFilter, d.ClientScheme,\n\t\t\td.ClientPort, d.ServerScheme, d.ServerPort); err == nil {\n\t\t\tfor _, m := range members {\n\t\t\t\t\/\/ have to cast here because of golang type-system--ugh!\n\t\t\t\texpectedMembers = append(expectedMembers, etcd.Member(m))\n\t\t\t\tif len(m.PeerURLs) == 0 {\n\t\t\t\t\tlog.Fatalf(\"Platform %s returned an invalid member which will be ignored: %#v\", d.Platform, m)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(expectedMembers) == 0 {\n\t\t\tsleepTime := (2 * time.Second)\n\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\tlog.Debugf(\"Failed to resolve expected members; sleeping for %s\", sleepTime)\n\t\t\t}\n\t\t\ttime.Sleep(sleepTime)\n\t\t}\n\t}\n\n\tif len(expectedMembers) == 0 {\n\t\tlog.Fatal(\"Failed to determine expected members\")\n\t} else if log.GetLevel() >= log.DebugLevel {\n\t\tlog.Debugf(\"Expected cluster members: %v#\", expectedMembers)\n\t}\n\tsort.Slice(expectedMembers, func(i, j int) bool { return expectedMembers[i].Name < expectedMembers[j].Name })\n\n\tlocalMaster := findMemberByName(expectedMembers, p.LocalInstanceName())\n\tmembersAPI, currentMembers, uptime, err := d.resolveMembersAndAPI(expectedMembers, localMaster)\n\n\tenvironment := map[string]string{}\n\tenvironment[\"ETCD_NAME\"] = p.LocalInstanceName()\n\tenvironment[\"ETCD_INITIAL_CLUSTER\"] = initialClusterString(expectedMembers)\n\n\tif localMaster != nil {\n\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\tlog.Debugf(\"Local master: %#v\", *localMaster)\n\t\t}\n\t\t\/\/ this instance is an expected master\n\t\tif len(currentMembers) > 0 && !containsMember(currentMembers, *localMaster) && uptime >= d.MinimumUptimeToJoin {\n\t\t\t\/\/ there is an existing cluster\n\t\t\tif err = d.assertSaneClusterState(expectedMembers, currentMembers); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\td.evictBadPeers(membersAPI, expectedMembers, currentMembers)\n\t\t\tlog.Infof(\"Joining existing cluster as a master\")\n\t\t\t\/\/ TODO: what if we encounter a state where not of the expected masters are\n\t\t\t\/\/ members of the current cluster?\n\t\t\tif err := d.joinExistingCluster(membersAPI, expectedMembers, localMaster); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tenvironment[\"ETCD_INITIAL_CLUSTER_STATE\"] = \"existing\"\n\t\t} else {\n\t\t\tlog.Infof(\"Creating a new cluster\")\n\t\t\tenvironment[\"ETCD_INITIAL_CLUSTER_STATE\"] = \"new\"\n\t\t}\n\t} else if d.ProxyMode {\n\t\tlog.Infof(\"Proxying existing cluster\")\n\t\tenvironment[\"ETCD_INITIAL_CLUSTER_STATE\"] = \"existing\"\n\t\tenvironment[\"ETCD_PROXY\"] = \"on\"\n\t} else {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Invalid cluster configuration: localhost (%s) is not an expected master, and not in proxy mode\",\n\t\t\tp.LocalInstanceName())\n\t}\n\treturn environment, nil\n}\n\nfunc initialClusterString(members []etcd.Member) string {\n\tif len(members) > 0 {\n\t\tinitialCluster := make([]string, 0, len(members))\n\t\tfor _, m := range members {\n\t\t\tmember := fmt.Sprintf(\"%s=%s\", m.Name, m.PeerURLs[0])\n\t\t\tinitialCluster = append(initialCluster, member)\n\t\t}\n\t\treturn strings.Join(initialCluster, \",\")\n\t}\n\treturn \"\"\n}\n\n\/\/ Check for mismatched names between expected and current members with\n\/\/ matching peer URLs; also check for lack of intersection between\n\/\/ expected and current members--indicating an invalid current (or expected)\n\/\/ cluster state\nfunc (d *Discovery) assertSaneClusterState(expectedMembers []etcd.Member, currentMembers []etcd.Member) error {\n\tpartialMatchCount := 0\n\tfor _, current := range currentMembers {\n\t\tfor _, expected := range expectedMembers {\n\t\t\tmatchingPeerURL := \"\"\n\t\t\tfor _, expectedPeerURL := range expected.PeerURLs {\n\t\t\t\tfor _, currentPeerURL := range current.PeerURLs {\n\t\t\t\t\tif expectedPeerURL == currentPeerURL {\n\t\t\t\t\t\tmatchingPeerURL = expectedPeerURL\n\t\t\t\t\t\tpartialMatchCount++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(matchingPeerURL) > 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(matchingPeerURL) > 0 && len(current.Name) > 0 && current.Name != expected.Name {\n\t\t\t\tif !d.IgnoreNamingMismatch {\n\t\t\t\t\treturn fmt.Errorf(\"Expected peer %s with peer URL %s already exists with a different name: %s\",\n\t\t\t\t\t\texpected.Name, matchingPeerURL, current.Name)\n\t\t\t\t} else if log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\tlog.Debugf(\"Ignoring expected peer %s with peer URL %s already exists with a different name: %s\",\n\t\t\t\t\t\texpected.Name, matchingPeerURL, current.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif partialMatchCount == 0 && len(expectedMembers) > 0 && len(currentMembers) > 0 {\n\t\texpectedJSON, _ := json.Marshal(expectedMembers)\n\t\tcurrentJSON, _ := json.Marshal(currentMembers)\n\t\treturn fmt.Errorf(\"Invalid cluster state: found no intersection between peer URLs of expected members %s and current members %s\",\n\t\t\texpectedJSON, currentJSON)\n\t}\n\n\treturn nil\n}\n\nfunc (d *Discovery) evictBadPeers(membersAPI etcd.MembersAPI, expectedMembers []etcd.Member, currentMembers []etcd.Member) {\n\n\tfor _, peer := range currentMembers {\n\t\tif !containsMember(expectedMembers, peer) {\n\t\t\tmsg := fmt.Sprintf(\"Ejecting bad peer %s %v from the cluster:\", peer.Name, peer.PeerURLs)\n\t\t\tif d.DryRun {\n\t\t\t\tlog.Infof(\"DRY_RUN: would have ejected peer %s %v from the cluster\", peer.Name, peer.PeerURLs)\n\t\t\t} else {\n\t\t\t\tfor tries := 0; tries < d.MaxTries; tries++ {\n\t\t\t\t\terr := membersAPI.Remove(context.Background(), peer.ID)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tlog.Infof(\"%s DONE\", msg)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else if (tries + 1) == d.MaxTries {\n\t\t\t\t\t\tlog.Errorf(\"%s ERROR: %v\", msg, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *Discovery) joinExistingCluster(membersAPI etcd.MembersAPI,\n\texpectedMembers []etcd.Member, localMember *etcd.Member) error {\n\n\tmsg := \"Joining existing cluster: \"\n\tfor tries := 0; tries < d.MaxTries; tries++ {\n\t\tif d.DryRun {\n\t\t\tlog.Infof(\"DRY_RUN: would have added %s %v to the cluster\", localMember.Name, localMember.PeerURLs)\n\t\t} else {\n\t\t\t_, err := membersAPI.Add(context.Background(), localMember.PeerURLs[0])\n\t\t\tif err == nil {\n\t\t\t\tlog.Infof(\"%s DONE\", msg)\n\t\t\t\tbreak\n\t\t\t} else if (tries + 1) == d.MaxTries {\n\t\t\t\tlog.Errorf(\"%s ERROR: %v\", msg, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\tlog.Debugf(\"Retryable error attempting to add local master %#v: %v\", localMember, err)\n\t\t\t}\n\t\t\tmembersAPI, _, _, err = d.resolveMembersAndAPI(expectedMembers, localMember)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s ERROR: %v\", msg, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Discovery) resolveMembersAndAPI(expectedMembers []etcd.Member,\n\tlocalMember *etcd.Member) (membersAPI etcd.MembersAPI, currentMembers []etcd.Member, uptime time.Duration, err error) {\n\n\tctx := context.Background()\n\tvar lastErr error\n\tfor tries := 0; tries <= d.MaxTries; tries++ {\n\t\tfor _, member := range expectedMembers {\n\t\t\t\/\/ don't attempt self connection; afterall, this is intended as a pre-cursor\n\t\t\t\/\/ to the actual etcd service on the local host\n\t\t\tif localMember != nil && member.PeerURLs[0] != localMember.PeerURLs[0] {\n\t\t\t\tcfg := etcd.Config{\n\t\t\t\t\tEndpoints: member.ClientURLs,\n\t\t\t\t\tTransport: etcd.DefaultTransport,\n\t\t\t\t\t\/\/ set timeout per request to fail fast when the target endpoint is unavailable\n\t\t\t\t\tHeaderTimeoutPerRequest: time.Second,\n\t\t\t\t}\n\t\t\t\tetcdClient, err := etcd.New(cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error connecting to %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmembersAPI = etcd.NewMembersAPI(etcdClient)\n\t\t\t\tleader, err := membersAPI.Leader(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error getting leader %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t} else if leader == nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error getting leader %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = errors.New(\"Failed to resolve cluster leader\")\n\t\t\t\t}\n\n\t\t\t\tcurrentMembers, err = membersAPI.List(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error listing members %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tuptime, err = getUptime(member.ClientURLs[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\t\tlog.Debugf(\"Error listing leader uptime %s %v, %v\", member.Name, member.ClientURLs, err)\n\t\t\t\t\t}\n\t\t\t\t\tlastErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t} else if log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\tlog.Debugf(\"Member %s reported current leader uptime is %s\", member.Name, uptime)\n\t\t\t\t}\n\n\t\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\t\tlog.Debugf(\"Actual cluster members: %#v\", currentMembers)\n\t\t\t\t}\n\t\t\t\treturn membersAPI, currentMembers, uptime, nil\n\t\t\t}\n\t\t}\n\t\tif len(currentMembers) == 0 {\n\t\t\t\/\/ TODO: what's our timeout here?\n\t\t\tsleepTime := (1 * time.Second)\n\t\t\tif log.GetLevel() >= log.DebugLevel {\n\t\t\t\tlog.Debugf(\"Failed to resolve members; sleeping for %s\", sleepTime)\n\t\t\t}\n\t\t\ttime.Sleep(sleepTime)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil, nil, time.Millisecond, lastErr\n}\n\nfunc getUptime(endpoint string) (time.Duration, error) {\n\tresp, err := http.DefaultClient.Get(endpoint + \"\/v2\/stats\/self\")\n\tif err != nil {\n\t\treturn time.Millisecond, err\n\t}\n\n\tdefer resp.Body.Close()\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn time.Millisecond, err\n\t}\n\tstats := make(map[string]interface{})\n\terr = json.Unmarshal(contents, &stats)\n\tif err != nil {\n\t\treturn time.Millisecond, err\n\t}\n\tif leaderInfo, ok := stats[\"leaderInfo\"]; ok {\n\t\tif uptimeString, ok := leaderInfo.(map[string]interface{})[\"uptime\"]; ok {\n\t\t\treturn time.ParseDuration(uptimeString.(string))\n\t\t}\n\t}\n\treturn time.Millisecond, fmt.Errorf(\"Missing leader uptime info for endpiont %s\", endpoint)\n}\n<|endoftext|>"} {"text":"<commit_before>package localpeer\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\n\t\"nimona.io\/internal\/rand\"\n\t\"nimona.io\/pkg\/crypto\"\n\t\"nimona.io\/pkg\/object\"\n\t\"nimona.io\/pkg\/peer\"\n)\n\n\/\/go:generate mockgen -destination=..\/localpeermock\/localpeermock_generated.go -package=localpeermock -source=localpeer.go\n\/\/go:generate genny -in=$GENERATORS\/synclist\/synclist.go -out=cids_generated.go -imp=nimona.io\/pkg\/object -pkg=localpeer gen \"KeyType=object.CID\"\n\/\/go:generate genny -in=$GENERATORS\/synclist\/synclist.go -out=certificates_generated.go -imp=nimona.io\/pkg\/peer -pkg=localpeer gen \"KeyType=*object.Certificate\"\n\/\/go:generate genny -in=$GENERATORS\/synclist\/synclist.go -out=addresses_generated.go -imp=nimona.io\/pkg\/peer -pkg=localpeer gen \"KeyType=string\"\n\ntype (\n\tLocalPeer interface {\n\t\tGetPrimaryPeerKey() crypto.PrivateKey\n\t\tPutPrimaryPeerKey(crypto.PrivateKey)\n\t\tGetPrimaryIdentityKey() crypto.PrivateKey\n\t\tPutPrimaryIdentityKey(crypto.PrivateKey)\n\t\tGetCertificates() []*object.Certificate\n\t\tPutCertificate(*object.Certificate)\n\t\tGetCIDs() []object.CID\n\t\tPutCIDs(...object.CID)\n\t\tGetContentTypes() []string\n\t\tPutContentTypes(...string)\n\t\tGetAddresses() []string\n\t\tPutAddresses(...string)\n\t\tGetRelays() []*peer.ConnectionInfo\n\t\tPutRelays(...*peer.ConnectionInfo)\n\t\tConnectionInfo() *peer.ConnectionInfo\n\t\tListenForUpdates() (<-chan UpdateEvent, func())\n\t}\n\tlocalPeer struct {\n\t\tkeyLock sync.RWMutex\n\t\tprimaryPeerKey crypto.PrivateKey\n\t\tprimaryIdentityKey crypto.PrivateKey\n\t\tcids *ObjectCIDSyncList\n\t\tcontentTypes *StringSyncList\n\t\tcertificates *ObjectCertificateSyncList\n\t\taddresses *StringSyncList\n\t\trelays []*peer.ConnectionInfo\n\t\tlisteners map[string]chan UpdateEvent\n\t\tlistenersLock sync.RWMutex\n\t}\n\tUpdateEvent string\n)\n\nconst (\n\tEventContentTypesUpdated UpdateEvent = \"contentTypeUpdated\"\n\tEventCIDsUpdated UpdateEvent = \"cidsUpdated\"\n\tEventAddressesUpdated UpdateEvent = \"addressesUpdated\"\n\tEventRelaysUpdated UpdateEvent = \"relaysUpdated\"\n\tEventPrimaryIdentityKeyUpdated UpdateEvent = \"primaryIdentityKeyUpdated\"\n)\n\nfunc New() LocalPeer {\n\treturn &localPeer{\n\t\tkeyLock: sync.RWMutex{},\n\t\tcids: &ObjectCIDSyncList{},\n\t\tcontentTypes: &StringSyncList{},\n\t\tcertificates: &ObjectCertificateSyncList{},\n\t\taddresses: &StringSyncList{},\n\t\trelays: []*peer.ConnectionInfo{},\n\t\tlisteners: map[string]chan UpdateEvent{},\n\t\tlistenersLock: sync.RWMutex{},\n\t}\n}\n\nfunc (s *localPeer) PutPrimaryPeerKey(k crypto.PrivateKey) {\n\ts.keyLock.Lock()\n\ts.primaryPeerKey = k\n\ts.keyLock.Unlock()\n}\n\nfunc (s *localPeer) PutPrimaryIdentityKey(k crypto.PrivateKey) {\n\ts.keyLock.Lock()\n\ts.primaryIdentityKey = k\n\ts.keyLock.Unlock()\n\ts.publishUpdate(EventPrimaryIdentityKeyUpdated)\n}\n\nfunc (s *localPeer) GetPrimaryPeerKey() crypto.PrivateKey {\n\ts.keyLock.RLock()\n\tdefer s.keyLock.RUnlock() \/\/nolint: gocritic\n\treturn s.primaryPeerKey\n}\n\nfunc (s *localPeer) GetPrimaryIdentityKey() crypto.PrivateKey {\n\ts.keyLock.RLock()\n\tdefer s.keyLock.RUnlock() \/\/nolint: gocritic\n\treturn s.primaryIdentityKey\n}\n\nfunc (s *localPeer) PutCertificate(c *object.Certificate) {\n\ts.certificates.Put(c)\n}\n\nfunc (s *localPeer) GetCertificates() []*object.Certificate {\n\treturn s.certificates.List()\n}\n\nfunc (s *localPeer) GetAddresses() []string {\n\tas := s.addresses.List()\n\tsort.Strings(as)\n\treturn as\n}\n\nfunc (s *localPeer) PutAddresses(addresses ...string) {\n\tfor _, h := range addresses {\n\t\ts.addresses.Put(h)\n\t}\n\ts.publishUpdate(EventAddressesUpdated)\n}\n\nfunc (s *localPeer) GetCIDs() []object.CID {\n\treturn s.cids.List()\n}\n\nfunc (s *localPeer) PutCIDs(cids ...object.CID) {\n\tfor _, h := range cids {\n\t\ts.cids.Put(h)\n\t}\n\ts.publishUpdate(EventCIDsUpdated)\n}\n\nfunc (s *localPeer) GetContentTypes() []string {\n\treturn s.contentTypes.List()\n}\n\nfunc (s *localPeer) PutContentTypes(contentTypes ...string) {\n\tfor _, h := range contentTypes {\n\t\ts.contentTypes.Put(h)\n\t}\n\ts.publishUpdate(EventContentTypesUpdated)\n}\n\nfunc (s *localPeer) GetRelays() []*peer.ConnectionInfo {\n\ts.keyLock.RLock()\n\tdefer s.keyLock.RUnlock()\n\treturn s.relays\n}\n\nfunc (s *localPeer) PutRelays(relays ...*peer.ConnectionInfo) {\n\ts.keyLock.Lock()\n\tdefer s.keyLock.Unlock()\n\ts.relays = append(s.relays, relays...)\n\ts.publishUpdate(EventRelaysUpdated)\n}\n\nfunc (s *localPeer) ConnectionInfo() *peer.ConnectionInfo {\n\treturn &peer.ConnectionInfo{\n\t\tPublicKey: s.GetPrimaryPeerKey().PublicKey(),\n\t\tAddresses: s.GetAddresses(),\n\t\tRelays: s.GetRelays(),\n\t\tObjectFormats: []string{\n\t\t\t\"json\",\n\t\t},\n\t}\n}\n\nfunc (s *localPeer) publishUpdate(e UpdateEvent) {\n\ts.listenersLock.RLock()\n\tdefer s.listenersLock.RUnlock()\n\tfor _, l := range s.listeners {\n\t\tselect {\n\t\tcase l <- e:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (s *localPeer) ListenForUpdates() (\n\tupdates <-chan UpdateEvent,\n\tcancel func(),\n) {\n\tc := make(chan UpdateEvent)\n\ts.listenersLock.Lock()\n\tdefer s.listenersLock.Unlock()\n\tid := rand.String(8)\n\ts.listeners[id] = c\n\tf := func() {\n\t\ts.listenersLock.Lock()\n\t\tdefer s.listenersLock.Unlock()\n\t\tdelete(s.listeners, id)\n\t}\n\treturn c, f\n}\n<commit_msg>chore(localpeer): add TODO for merging methods<commit_after>package localpeer\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\n\t\"nimona.io\/internal\/rand\"\n\t\"nimona.io\/pkg\/crypto\"\n\t\"nimona.io\/pkg\/object\"\n\t\"nimona.io\/pkg\/peer\"\n)\n\n\/\/go:generate mockgen -destination=..\/localpeermock\/localpeermock_generated.go -package=localpeermock -source=localpeer.go\n\/\/go:generate genny -in=$GENERATORS\/synclist\/synclist.go -out=cids_generated.go -imp=nimona.io\/pkg\/object -pkg=localpeer gen \"KeyType=object.CID\"\n\/\/go:generate genny -in=$GENERATORS\/synclist\/synclist.go -out=certificates_generated.go -imp=nimona.io\/pkg\/peer -pkg=localpeer gen \"KeyType=*object.Certificate\"\n\/\/go:generate genny -in=$GENERATORS\/synclist\/synclist.go -out=addresses_generated.go -imp=nimona.io\/pkg\/peer -pkg=localpeer gen \"KeyType=string\"\n\ntype (\n\tLocalPeer interface {\n\t\t\/\/ TODO merge peer\/id methods, use .Usage to distinguish\n\t\tGetPrimaryPeerKey() crypto.PrivateKey\n\t\tPutPrimaryPeerKey(crypto.PrivateKey)\n\t\tGetPrimaryIdentityKey() crypto.PrivateKey\n\t\tPutPrimaryIdentityKey(crypto.PrivateKey)\n\t\tGetCertificates() []*object.Certificate\n\t\tPutCertificate(*object.Certificate)\n\t\tGetCIDs() []object.CID\n\t\tPutCIDs(...object.CID)\n\t\tGetContentTypes() []string\n\t\tPutContentTypes(...string)\n\t\tGetAddresses() []string\n\t\tPutAddresses(...string)\n\t\tGetRelays() []*peer.ConnectionInfo\n\t\tPutRelays(...*peer.ConnectionInfo)\n\t\tConnectionInfo() *peer.ConnectionInfo\n\t\tListenForUpdates() (<-chan UpdateEvent, func())\n\t}\n\tlocalPeer struct {\n\t\tkeyLock sync.RWMutex\n\t\tprimaryPeerKey crypto.PrivateKey\n\t\tprimaryIdentityKey crypto.PrivateKey\n\t\tcids *ObjectCIDSyncList\n\t\tcontentTypes *StringSyncList\n\t\tcertificates *ObjectCertificateSyncList\n\t\taddresses *StringSyncList\n\t\trelays []*peer.ConnectionInfo\n\t\tlisteners map[string]chan UpdateEvent\n\t\tlistenersLock sync.RWMutex\n\t}\n\tUpdateEvent string\n)\n\nconst (\n\tEventContentTypesUpdated UpdateEvent = \"contentTypeUpdated\"\n\tEventCIDsUpdated UpdateEvent = \"cidsUpdated\"\n\tEventAddressesUpdated UpdateEvent = \"addressesUpdated\"\n\tEventRelaysUpdated UpdateEvent = \"relaysUpdated\"\n\tEventPrimaryIdentityKeyUpdated UpdateEvent = \"primaryIdentityKeyUpdated\"\n)\n\nfunc New() LocalPeer {\n\treturn &localPeer{\n\t\tkeyLock: sync.RWMutex{},\n\t\tcids: &ObjectCIDSyncList{},\n\t\tcontentTypes: &StringSyncList{},\n\t\tcertificates: &ObjectCertificateSyncList{},\n\t\taddresses: &StringSyncList{},\n\t\trelays: []*peer.ConnectionInfo{},\n\t\tlisteners: map[string]chan UpdateEvent{},\n\t\tlistenersLock: sync.RWMutex{},\n\t}\n}\n\nfunc (s *localPeer) PutPrimaryPeerKey(k crypto.PrivateKey) {\n\ts.keyLock.Lock()\n\ts.primaryPeerKey = k\n\ts.keyLock.Unlock()\n}\n\nfunc (s *localPeer) PutPrimaryIdentityKey(k crypto.PrivateKey) {\n\ts.keyLock.Lock()\n\ts.primaryIdentityKey = k\n\ts.keyLock.Unlock()\n\ts.publishUpdate(EventPrimaryIdentityKeyUpdated)\n}\n\nfunc (s *localPeer) GetPrimaryPeerKey() crypto.PrivateKey {\n\ts.keyLock.RLock()\n\tdefer s.keyLock.RUnlock() \/\/nolint: gocritic\n\treturn s.primaryPeerKey\n}\n\nfunc (s *localPeer) GetPrimaryIdentityKey() crypto.PrivateKey {\n\ts.keyLock.RLock()\n\tdefer s.keyLock.RUnlock() \/\/nolint: gocritic\n\treturn s.primaryIdentityKey\n}\n\nfunc (s *localPeer) PutCertificate(c *object.Certificate) {\n\ts.certificates.Put(c)\n}\n\nfunc (s *localPeer) GetCertificates() []*object.Certificate {\n\treturn s.certificates.List()\n}\n\nfunc (s *localPeer) GetAddresses() []string {\n\tas := s.addresses.List()\n\tsort.Strings(as)\n\treturn as\n}\n\nfunc (s *localPeer) PutAddresses(addresses ...string) {\n\tfor _, h := range addresses {\n\t\ts.addresses.Put(h)\n\t}\n\ts.publishUpdate(EventAddressesUpdated)\n}\n\nfunc (s *localPeer) GetCIDs() []object.CID {\n\treturn s.cids.List()\n}\n\nfunc (s *localPeer) PutCIDs(cids ...object.CID) {\n\tfor _, h := range cids {\n\t\ts.cids.Put(h)\n\t}\n\ts.publishUpdate(EventCIDsUpdated)\n}\n\nfunc (s *localPeer) GetContentTypes() []string {\n\treturn s.contentTypes.List()\n}\n\nfunc (s *localPeer) PutContentTypes(contentTypes ...string) {\n\tfor _, h := range contentTypes {\n\t\ts.contentTypes.Put(h)\n\t}\n\ts.publishUpdate(EventContentTypesUpdated)\n}\n\nfunc (s *localPeer) GetRelays() []*peer.ConnectionInfo {\n\ts.keyLock.RLock()\n\tdefer s.keyLock.RUnlock()\n\treturn s.relays\n}\n\nfunc (s *localPeer) PutRelays(relays ...*peer.ConnectionInfo) {\n\ts.keyLock.Lock()\n\tdefer s.keyLock.Unlock()\n\ts.relays = append(s.relays, relays...)\n\ts.publishUpdate(EventRelaysUpdated)\n}\n\nfunc (s *localPeer) ConnectionInfo() *peer.ConnectionInfo {\n\treturn &peer.ConnectionInfo{\n\t\tPublicKey: s.GetPrimaryPeerKey().PublicKey(),\n\t\tAddresses: s.GetAddresses(),\n\t\tRelays: s.GetRelays(),\n\t\tObjectFormats: []string{\n\t\t\t\"json\",\n\t\t},\n\t}\n}\n\nfunc (s *localPeer) publishUpdate(e UpdateEvent) {\n\ts.listenersLock.RLock()\n\tdefer s.listenersLock.RUnlock()\n\tfor _, l := range s.listeners {\n\t\tselect {\n\t\tcase l <- e:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (s *localPeer) ListenForUpdates() (\n\tupdates <-chan UpdateEvent,\n\tcancel func(),\n) {\n\tc := make(chan UpdateEvent)\n\ts.listenersLock.Lock()\n\tdefer s.listenersLock.Unlock()\n\tid := rand.String(8)\n\ts.listeners[id] = c\n\tf := func() {\n\t\ts.listenersLock.Lock()\n\t\tdefer s.listenersLock.Unlock()\n\t\tdelete(s.listeners, id)\n\t}\n\treturn c, f\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package resources contains various utilities for dealing with Kubernetes resources.\npackage resources\n<commit_msg>Remove a superfluous file (#6999)<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sidecar\n\nimport(\n\t\"testing\"\n\n\t\"k8s.io\/dns\/pkg\/dnsmasq\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/ initMetrics initializes dnsmasq.Metrics with values for testing.\nfunc initMetrics(metricsList []*dnsmasq.Metrics, values []int64) {\n\tdefineDnsmasqMetrics(&Options{PrometheusNamespace:\"dnsmasq\"})\n\tfor i := range metricsList {\n\t\tmetricsList[i] = &dnsmasq.Metrics{}\n\t\tfor j := range dnsmasq.AllMetrics {\n\t\t\tmetric := dnsmasq.AllMetrics[j]\n\t\t\t\/\/ Avoids giving each metric the same value.\n\t\t\t(*(metricsList[i]))[metric] = values[j] * int64(i+1)\n\t\t}\n\t}\n}\n\n\/\/ TestExportMetrics tests if our countersCache works as expected.\nfunc TestExportMetrics(t *testing.T) {\n\tvar m1, m2, m3 *dnsmasq.Metrics\n l := []*dnsmasq.Metrics{m1, m2, m3}\n\n\ttestMetricValues := []int64{10, 20, 30, 40, 50}\n\tinitMetrics(l, testMetricValues)\n\n\tfor i := range l {\n\t\texportMetrics(l[i])\n\t\tfor j := range dnsmasq.AllMetrics {\n\t\t\tassert.Equal(t, countersCache[dnsmasq.AllMetrics[j]], float64(testMetricValues[j] * int64(i+1)))\n\t\t}\n\t}\n}\n<commit_msg>Ran gofmt<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sidecar\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"k8s.io\/dns\/pkg\/dnsmasq\"\n)\n\n\/\/ initMetrics initializes dnsmasq.Metrics with values for testing.\nfunc initMetrics(metricsList []*dnsmasq.Metrics, values []int64) {\n\tdefineDnsmasqMetrics(&Options{PrometheusNamespace: \"dnsmasq\"})\n\tfor i := range metricsList {\n\t\tmetricsList[i] = &dnsmasq.Metrics{}\n\t\tfor j := range dnsmasq.AllMetrics {\n\t\t\tmetric := dnsmasq.AllMetrics[j]\n\t\t\t\/\/ Avoids giving each metric the same value.\n\t\t\t(*(metricsList[i]))[metric] = values[j] * int64(i+1)\n\t\t}\n\t}\n}\n\n\/\/ TestExportMetrics tests if our countersCache works as expected.\nfunc TestExportMetrics(t *testing.T) {\n\tvar m1, m2, m3 *dnsmasq.Metrics\n\tl := []*dnsmasq.Metrics{m1, m2, m3}\n\n\ttestMetricValues := []int64{10, 20, 30, 40, 50}\n\tinitMetrics(l, testMetricValues)\n\n\tfor i := range l {\n\t\texportMetrics(l[i])\n\t\tfor j := range dnsmasq.AllMetrics {\n\t\t\tassert.Equal(t, countersCache[dnsmasq.AllMetrics[j]], float64(testMetricValues[j]*int64(i+1)))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tarheader\n\nimport (\n\t\"archive\/tar\"\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc init() {\n\tpopulateHeaderStat = append(populateHeaderStat, populateHeaderUnix)\n}\n\nfunc populateHeaderUnix(h *tar.Header, fi os.FileInfo, seen map[uint64]string) {\n\tst, ok := fi.Sys().(*syscall.Stat_t)\n\tif !ok {\n\t\treturn\n\t}\n\th.Uid = int(st.Uid)\n\th.Gid = int(st.Gid)\n\t\/\/ If we have already seen this inode, generate a hardlink\n\tp, ok := seen[st.Ino]\n\tif ok {\n\t\th.Linkname = p\n\t\th.Typeflag = tar.TypeLink\n\t} else {\n\t\tseen[st.Ino] = h.Name\n\t}\n}\n<commit_msg>pkg\/tarheader: cast inode up to uint64<commit_after>package tarheader\n\nimport (\n\t\"archive\/tar\"\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc init() {\n\tpopulateHeaderStat = append(populateHeaderStat, populateHeaderUnix)\n}\n\nfunc populateHeaderUnix(h *tar.Header, fi os.FileInfo, seen map[uint64]string) {\n\tst, ok := fi.Sys().(*syscall.Stat_t)\n\tif !ok {\n\t\treturn\n\t}\n\th.Uid = int(st.Uid)\n\th.Gid = int(st.Gid)\n\t\/\/ If we have already seen this inode, generate a hardlink\n\tp, ok := seen[uint64(st.Ino)]\n\tif ok {\n\t\th.Linkname = p\n\t\th.Typeflag = tar.TypeLink\n\t} else {\n\t\tseen[uint64(st.Ino)] = h.Name\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage k8sutil\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\tbackupenv \"github.com\/coreos\/etcd-operator\/pkg\/backup\/env\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/spec\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/constants\"\n\n\t\"github.com\/coreos\/etcd-operator\/pkg\/backup\/s3\/s3config\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/resource\"\n\tunversionedAPI \"k8s.io\/kubernetes\/pkg\/api\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/extensions\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/storage\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/intstr\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/pkg\/watch\"\n)\n\nconst (\n\tstorageClassPrefix = \"etcd-operator-backup\"\n\tBackupPodSelectorAppField = \"etcd_backup_tool\"\n\tbackupPVVolName = \"etcd-backup-storage\"\n\tawsCredentialDir = \"\/root\/.aws\/\"\n\tawsConfigDir = \"\/root\/.aws\/config\/\"\n\tawsSecretVolName = \"secret-aws\"\n\tawsConfigVolName = \"config-aws\"\n\tfromDirMountDir = \"\/mnt\/backup\/from\"\n)\n\nfunc CreateStorageClass(kubecli *unversioned.Client, pvProvisioner string) error {\n\t\/\/ We need to get rid of prefix because naming doesn't support \"\/\".\n\tname := storageClassPrefix + \"-\" + path.Base(pvProvisioner)\n\tclass := &storage.StorageClass{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tProvisioner: pvProvisioner,\n\t}\n\t_, err := kubecli.StorageClasses().Create(class)\n\treturn err\n}\n\nfunc CreateAndWaitPVC(kubecli *unversioned.Client, clusterName, ns, pvProvisioner string, volumeSizeInMB int) error {\n\tname := makePVCName(clusterName)\n\tstorageClassName := storageClassPrefix + \"-\" + path.Base(pvProvisioner)\n\tclaim := &api.PersistentVolumeClaim{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"volume.beta.kubernetes.io\/storage-class\": storageClassName,\n\t\t\t},\n\t\t},\n\t\tSpec: api.PersistentVolumeClaimSpec{\n\t\t\tAccessModes: []api.PersistentVolumeAccessMode{\n\t\t\t\tapi.ReadWriteOnce,\n\t\t\t},\n\t\t\tResources: api.ResourceRequirements{\n\t\t\t\tRequests: api.ResourceList{\n\t\t\t\t\tapi.ResourceStorage: resource.MustParse(fmt.Sprintf(\"%dMi\", volumeSizeInMB)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubecli.PersistentVolumeClaims(ns).Create(claim)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = wait.Poll(4*time.Second, 20*time.Second, func() (bool, error) {\n\t\tvar err error\n\t\tclaim, err = kubecli.PersistentVolumeClaims(ns).Get(name)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif claim.Status.Phase != api.ClaimBound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\twErr := fmt.Errorf(\"fail to wait PVC (%s) '(%v)\/Bound': %v\", name, claim.Status.Phase, err)\n\t\treturn wErr\n\t}\n\n\treturn nil\n}\n\nvar BackupImage = \"quay.io\/coreos\/etcd-operator:latest\"\n\nfunc PodSpecWithPV(ps *api.PodSpec, clusterName string) *api.PodSpec {\n\tps.Containers[0].VolumeMounts = []api.VolumeMount{{\n\t\tName: backupPVVolName,\n\t\tMountPath: constants.BackupDir,\n\t}}\n\tps.Volumes = []api.Volume{{\n\t\tName: backupPVVolName,\n\t\tVolumeSource: api.VolumeSource{\n\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: makePVCName(clusterName),\n\t\t\t},\n\t\t},\n\t}}\n\treturn ps\n}\n\nfunc PodSpecWithS3(ps *api.PodSpec, s3Ctx s3config.S3Context) *api.PodSpec {\n\tps.Containers[0].VolumeMounts = []api.VolumeMount{{\n\t\tName: awsSecretVolName,\n\t\tMountPath: awsCredentialDir,\n\t}, {\n\t\tName: awsConfigVolName,\n\t\tMountPath: awsConfigDir,\n\t}}\n\tps.Volumes = []api.Volume{{\n\t\tName: awsSecretVolName,\n\t\tVolumeSource: api.VolumeSource{\n\t\t\tSecret: &api.SecretVolumeSource{\n\t\t\t\tSecretName: s3Ctx.AWSSecret,\n\t\t\t},\n\t\t},\n\t}, {\n\t\tName: awsConfigVolName,\n\t\tVolumeSource: api.VolumeSource{\n\t\t\tConfigMap: &api.ConfigMapVolumeSource{\n\t\t\t\tLocalObjectReference: api.LocalObjectReference{\n\t\t\t\t\tName: s3Ctx.AWSConfig,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}}\n\tps.Containers[0].Env = append(ps.Containers[0].Env, api.EnvVar{\n\t\tName: backupenv.AWSConfig,\n\t\tValue: awsConfigDir,\n\t}, api.EnvVar{\n\t\tName: backupenv.AWSS3Bucket,\n\t\tValue: s3Ctx.S3Bucket,\n\t})\n\treturn ps\n}\n\nfunc MakeBackupPodSpec(clusterName string, policy *spec.BackupPolicy) (*api.PodSpec, error) {\n\tbp, err := json.Marshal(policy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tps := &api.PodSpec{\n\t\tContainers: []api.Container{\n\t\t\t{\n\t\t\t\tName: \"backup\",\n\t\t\t\tImage: BackupImage,\n\t\t\t\tCommand: []string{\n\t\t\t\t\t\"\/bin\/sh\",\n\t\t\t\t\t\"-c\",\n\t\t\t\t\t\"\/usr\/local\/bin\/etcd-backup --etcd-cluster=\" + clusterName,\n\t\t\t\t},\n\t\t\t\tEnv: []api.EnvVar{{\n\t\t\t\t\tName: \"MY_POD_NAMESPACE\",\n\t\t\t\t\tValueFrom: &api.EnvVarSource{FieldRef: &api.ObjectFieldSelector{FieldPath: \"metadata.namespace\"}},\n\t\t\t\t}, {\n\t\t\t\t\tName: backupenv.BackupPolicy,\n\t\t\t\t\tValue: string(bp),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}\n\treturn ps, nil\n}\n\nfunc CreateBackupReplicaSetAndService(kubecli *unversioned.Client, clusterName, ns string, ps api.PodSpec) error {\n\tlabels := map[string]string{\n\t\t\"app\": BackupPodSelectorAppField,\n\t\t\"etcd_cluster\": clusterName,\n\t}\n\tname := MakeBackupName(clusterName)\n\t_, err := kubecli.ReplicaSets(ns).Create(&extensions.ReplicaSet{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: extensions.ReplicaSetSpec{\n\t\t\tReplicas: 1,\n\t\t\tSelector: &unversionedAPI.LabelSelector{MatchLabels: labels},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: ps,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tif !IsKubernetesResourceAlreadyExistError(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsvc := &api.Service{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: api.ServiceSpec{\n\t\t\tPorts: []api.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"backup-service\",\n\t\t\t\t\tPort: constants.DefaultBackupPodHTTPPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(constants.DefaultBackupPodHTTPPort),\n\t\t\t\t\tProtocol: api.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: labels,\n\t\t},\n\t}\n\tif _, err := kubecli.Services(ns).Create(svc); err != nil {\n\t\tif !IsKubernetesResourceAlreadyExistError(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DeleteBackupReplicaSetAndService(kubecli *unversioned.Client, clusterName, ns string, cleanup bool) error {\n\tname := MakeBackupName(clusterName)\n\terr := kubecli.Services(ns).Delete(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\torphanOption := false\n\tgracePeriod := int64(0)\n\terr = kubecli.ReplicaSets(ns).Delete(name, &api.DeleteOptions{\n\t\tOrphanDependents: &orphanOption,\n\t\tGracePeriodSeconds: &gracePeriod,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cleanup {\n\t\tkubecli.PersistentVolumeClaims(ns).Delete(makePVCName(clusterName))\n\t}\n\treturn nil\n}\n\nfunc CopyVolume(kubecli *unversioned.Client, fromClusterName, toClusterName, ns string) error {\n\tpod := &api.Pod{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: copyVolumePodName(toClusterName),\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"etcd_cluster\": toClusterName,\n\t\t\t},\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"copy-backup\",\n\t\t\t\t\tImage: \"alpine\",\n\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\"\/bin\/sh\",\n\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\tfmt.Sprintf(\"cp -r %s\/* %s\/\", fromDirMountDir, constants.BackupDir),\n\t\t\t\t\t},\n\t\t\t\t\tVolumeMounts: []api.VolumeMount{{\n\t\t\t\t\t\tName: \"from-dir\",\n\t\t\t\t\t\tMountPath: fromDirMountDir,\n\t\t\t\t\t}, {\n\t\t\t\t\t\tName: \"to-dir\",\n\t\t\t\t\t\tMountPath: constants.BackupDir,\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: api.RestartPolicyNever,\n\t\t\tVolumes: []api.Volume{{\n\t\t\t\tName: \"from-dir\",\n\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\tClaimName: makePVCName(fromClusterName),\n\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, {\n\t\t\t\tName: \"to-dir\",\n\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\tClaimName: makePVCName(toClusterName),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tif _, err := kubecli.Pods(ns).Create(pod); err != nil {\n\t\treturn err\n\t}\n\n\tw, err := kubecli.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name}))\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ It could take long due to delay of k8s controller detaching the volume\n\t_, err = watch.Until(120*time.Second, w, unversioned.PodCompleted)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fail to wait data copy job completed: %v\", err)\n\t}\n\t\/\/ Delete the pod to detach the volume from the node\n\treturn kubecli.Pods(ns).Delete(pod.Name, api.NewDeleteOptions(0))\n}\n\nfunc copyVolumePodName(clusterName string) string {\n\treturn clusterName + \"-copyvolume\"\n}\n\nfunc makePVCName(clusterName string) string {\n\treturn fmt.Sprintf(\"%s-pvc\", clusterName)\n}\n<commit_msg>s3: fix config env<commit_after>\/\/ Copyright 2016 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage k8sutil\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\tbackupenv \"github.com\/coreos\/etcd-operator\/pkg\/backup\/env\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/spec\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/constants\"\n\n\t\"github.com\/coreos\/etcd-operator\/pkg\/backup\/s3\/s3config\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/resource\"\n\tunversionedAPI \"k8s.io\/kubernetes\/pkg\/api\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/extensions\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/storage\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/intstr\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/pkg\/watch\"\n)\n\nconst (\n\tstorageClassPrefix = \"etcd-operator-backup\"\n\tBackupPodSelectorAppField = \"etcd_backup_tool\"\n\tbackupPVVolName = \"etcd-backup-storage\"\n\tawsCredentialDir = \"\/root\/.aws\/\"\n\tawsConfigDir = \"\/root\/.aws\/config\/\"\n\tawsSecretVolName = \"secret-aws\"\n\tawsConfigVolName = \"config-aws\"\n\tfromDirMountDir = \"\/mnt\/backup\/from\"\n)\n\nfunc CreateStorageClass(kubecli *unversioned.Client, pvProvisioner string) error {\n\t\/\/ We need to get rid of prefix because naming doesn't support \"\/\".\n\tname := storageClassPrefix + \"-\" + path.Base(pvProvisioner)\n\tclass := &storage.StorageClass{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tProvisioner: pvProvisioner,\n\t}\n\t_, err := kubecli.StorageClasses().Create(class)\n\treturn err\n}\n\nfunc CreateAndWaitPVC(kubecli *unversioned.Client, clusterName, ns, pvProvisioner string, volumeSizeInMB int) error {\n\tname := makePVCName(clusterName)\n\tstorageClassName := storageClassPrefix + \"-\" + path.Base(pvProvisioner)\n\tclaim := &api.PersistentVolumeClaim{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"volume.beta.kubernetes.io\/storage-class\": storageClassName,\n\t\t\t},\n\t\t},\n\t\tSpec: api.PersistentVolumeClaimSpec{\n\t\t\tAccessModes: []api.PersistentVolumeAccessMode{\n\t\t\t\tapi.ReadWriteOnce,\n\t\t\t},\n\t\t\tResources: api.ResourceRequirements{\n\t\t\t\tRequests: api.ResourceList{\n\t\t\t\t\tapi.ResourceStorage: resource.MustParse(fmt.Sprintf(\"%dMi\", volumeSizeInMB)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubecli.PersistentVolumeClaims(ns).Create(claim)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = wait.Poll(4*time.Second, 20*time.Second, func() (bool, error) {\n\t\tvar err error\n\t\tclaim, err = kubecli.PersistentVolumeClaims(ns).Get(name)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif claim.Status.Phase != api.ClaimBound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\twErr := fmt.Errorf(\"fail to wait PVC (%s) '(%v)\/Bound': %v\", name, claim.Status.Phase, err)\n\t\treturn wErr\n\t}\n\n\treturn nil\n}\n\nvar BackupImage = \"quay.io\/coreos\/etcd-operator:latest\"\n\nfunc PodSpecWithPV(ps *api.PodSpec, clusterName string) *api.PodSpec {\n\tps.Containers[0].VolumeMounts = []api.VolumeMount{{\n\t\tName: backupPVVolName,\n\t\tMountPath: constants.BackupDir,\n\t}}\n\tps.Volumes = []api.Volume{{\n\t\tName: backupPVVolName,\n\t\tVolumeSource: api.VolumeSource{\n\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: makePVCName(clusterName),\n\t\t\t},\n\t\t},\n\t}}\n\treturn ps\n}\n\nfunc PodSpecWithS3(ps *api.PodSpec, s3Ctx s3config.S3Context) *api.PodSpec {\n\tps.Containers[0].VolumeMounts = []api.VolumeMount{{\n\t\tName: awsSecretVolName,\n\t\tMountPath: awsCredentialDir,\n\t}, {\n\t\tName: awsConfigVolName,\n\t\tMountPath: awsConfigDir,\n\t}}\n\tps.Volumes = []api.Volume{{\n\t\tName: awsSecretVolName,\n\t\tVolumeSource: api.VolumeSource{\n\t\t\tSecret: &api.SecretVolumeSource{\n\t\t\t\tSecretName: s3Ctx.AWSSecret,\n\t\t\t},\n\t\t},\n\t}, {\n\t\tName: awsConfigVolName,\n\t\tVolumeSource: api.VolumeSource{\n\t\t\tConfigMap: &api.ConfigMapVolumeSource{\n\t\t\t\tLocalObjectReference: api.LocalObjectReference{\n\t\t\t\t\tName: s3Ctx.AWSConfig,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}}\n\tps.Containers[0].Env = append(ps.Containers[0].Env, api.EnvVar{\n\t\tName: backupenv.AWSConfig,\n\t\tValue: path.Join(awsConfigDir, \"config\"),\n\t}, api.EnvVar{\n\t\tName: backupenv.AWSS3Bucket,\n\t\tValue: s3Ctx.S3Bucket,\n\t})\n\treturn ps\n}\n\nfunc MakeBackupPodSpec(clusterName string, policy *spec.BackupPolicy) (*api.PodSpec, error) {\n\tbp, err := json.Marshal(policy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tps := &api.PodSpec{\n\t\tContainers: []api.Container{\n\t\t\t{\n\t\t\t\tName: \"backup\",\n\t\t\t\tImage: BackupImage,\n\t\t\t\tCommand: []string{\n\t\t\t\t\t\"\/bin\/sh\",\n\t\t\t\t\t\"-c\",\n\t\t\t\t\t\"\/usr\/local\/bin\/etcd-backup --etcd-cluster=\" + clusterName,\n\t\t\t\t},\n\t\t\t\tEnv: []api.EnvVar{{\n\t\t\t\t\tName: \"MY_POD_NAMESPACE\",\n\t\t\t\t\tValueFrom: &api.EnvVarSource{FieldRef: &api.ObjectFieldSelector{FieldPath: \"metadata.namespace\"}},\n\t\t\t\t}, {\n\t\t\t\t\tName: backupenv.BackupPolicy,\n\t\t\t\t\tValue: string(bp),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}\n\treturn ps, nil\n}\n\nfunc CreateBackupReplicaSetAndService(kubecli *unversioned.Client, clusterName, ns string, ps api.PodSpec) error {\n\tlabels := map[string]string{\n\t\t\"app\": BackupPodSelectorAppField,\n\t\t\"etcd_cluster\": clusterName,\n\t}\n\tname := MakeBackupName(clusterName)\n\t_, err := kubecli.ReplicaSets(ns).Create(&extensions.ReplicaSet{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: extensions.ReplicaSetSpec{\n\t\t\tReplicas: 1,\n\t\t\tSelector: &unversionedAPI.LabelSelector{MatchLabels: labels},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: ps,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tif !IsKubernetesResourceAlreadyExistError(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsvc := &api.Service{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: api.ServiceSpec{\n\t\t\tPorts: []api.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"backup-service\",\n\t\t\t\t\tPort: constants.DefaultBackupPodHTTPPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(constants.DefaultBackupPodHTTPPort),\n\t\t\t\t\tProtocol: api.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: labels,\n\t\t},\n\t}\n\tif _, err := kubecli.Services(ns).Create(svc); err != nil {\n\t\tif !IsKubernetesResourceAlreadyExistError(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DeleteBackupReplicaSetAndService(kubecli *unversioned.Client, clusterName, ns string, cleanup bool) error {\n\tname := MakeBackupName(clusterName)\n\terr := kubecli.Services(ns).Delete(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\torphanOption := false\n\tgracePeriod := int64(0)\n\terr = kubecli.ReplicaSets(ns).Delete(name, &api.DeleteOptions{\n\t\tOrphanDependents: &orphanOption,\n\t\tGracePeriodSeconds: &gracePeriod,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cleanup {\n\t\tkubecli.PersistentVolumeClaims(ns).Delete(makePVCName(clusterName))\n\t}\n\treturn nil\n}\n\nfunc CopyVolume(kubecli *unversioned.Client, fromClusterName, toClusterName, ns string) error {\n\tpod := &api.Pod{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: copyVolumePodName(toClusterName),\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"etcd_cluster\": toClusterName,\n\t\t\t},\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"copy-backup\",\n\t\t\t\t\tImage: \"alpine\",\n\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\"\/bin\/sh\",\n\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\tfmt.Sprintf(\"cp -r %s\/* %s\/\", fromDirMountDir, constants.BackupDir),\n\t\t\t\t\t},\n\t\t\t\t\tVolumeMounts: []api.VolumeMount{{\n\t\t\t\t\t\tName: \"from-dir\",\n\t\t\t\t\t\tMountPath: fromDirMountDir,\n\t\t\t\t\t}, {\n\t\t\t\t\t\tName: \"to-dir\",\n\t\t\t\t\t\tMountPath: constants.BackupDir,\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: api.RestartPolicyNever,\n\t\t\tVolumes: []api.Volume{{\n\t\t\t\tName: \"from-dir\",\n\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\tClaimName: makePVCName(fromClusterName),\n\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, {\n\t\t\t\tName: \"to-dir\",\n\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\tClaimName: makePVCName(toClusterName),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tif _, err := kubecli.Pods(ns).Create(pod); err != nil {\n\t\treturn err\n\t}\n\n\tw, err := kubecli.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name}))\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ It could take long due to delay of k8s controller detaching the volume\n\t_, err = watch.Until(120*time.Second, w, unversioned.PodCompleted)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fail to wait data copy job completed: %v\", err)\n\t}\n\t\/\/ Delete the pod to detach the volume from the node\n\treturn kubecli.Pods(ns).Delete(pod.Name, api.NewDeleteOptions(0))\n}\n\nfunc copyVolumePodName(clusterName string) string {\n\treturn clusterName + \"-copyvolume\"\n}\n\nfunc makePVCName(clusterName string) string {\n\treturn fmt.Sprintf(\"%s-pvc\", clusterName)\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"fmt\"\n\n\tbuildv1alpha1 \"github.com\/knative\/build\/pkg\/apis\/build\/v1alpha1\"\n\tservingv1alpha1 \"github.com\/knative\/serving\/pkg\/apis\/serving\/v1alpha1\"\n\truntimev1alpha1 \"github.com\/kyma-incubator\/runtime\/pkg\/apis\/runtime\/v1alpha1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ GetServiceSpec gets ServiceSpec for a function\nfunc GetServiceSpec(imageName string, fn runtimev1alpha1.Function, rnInfo *RuntimeInfo) servingv1alpha1.ServiceSpec {\n\tdefaultMode := int32(420)\n\tbuildContainer := getBuildContainer(imageName, fn, rnInfo)\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"dockerfile-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: rnInfo.DockerFileConfigMapName(fn.Spec.Runtime),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"func-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: fn.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ TODO: Make it constant for nodejs8\/nodejs6\n\tenvVarsForRevision := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"FUNC_HANDLER\",\n\t\t\tValue: \"main\",\n\t\t},\n\t\t{\n\t\t\tName: \"MOD_NAME\",\n\t\t\tValue: \"handler\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_TIMEOUT\",\n\t\t\tValue: \"180\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_RUNTIME\",\n\t\t\tValue: \"nodejs8\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_MEMORY_LIMIT\",\n\t\t\tValue: \"128Mi\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_PORT\",\n\t\t\tValue: \"8080\",\n\t\t},\n\t\t{\n\t\t\tName: \"NODE_PATH\",\n\t\t\tValue: \"$(KUBELESS_INSTALL_VOLUME)\/node_modules\",\n\t\t},\n\t}\n\n\treturn servingv1alpha1.ServiceSpec{\n\t\tRunLatest: &servingv1alpha1.RunLatestType{\n\t\t\tConfiguration: servingv1alpha1.ConfigurationSpec{\n\t\t\t\tBuild: &servingv1alpha1.RawExtension{\n\t\t\t\t\tBuildSpec: &buildv1alpha1.BuildSpec{\n\t\t\t\t\t\tServiceAccountName: rnInfo.ServiceAccount,\n\t\t\t\t\t\tSteps: []corev1.Container{\n\t\t\t\t\t\t\t*buildContainer,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRevisionTemplate: servingv1alpha1.RevisionTemplateSpec{\n\t\t\t\t\tSpec: servingv1alpha1.RevisionSpec{\n\t\t\t\t\t\tContainer: corev1.Container{\n\t\t\t\t\t\t\tImage: imageName,\n\t\t\t\t\t\t\tEnv: envVarsForRevision,\n\t\t\t\t\t\t\tName: fn.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc getBuildContainer(imageName string, fn runtimev1alpha1.Function, riUtil *RuntimeInfo) *corev1.Container {\n\tdestination := fmt.Sprintf(\"--destination=%s\", imageName)\n\tbuildContainer := corev1.Container{\n\t\tName: \"build-and-push\",\n\t\tImage: \"gcr.io\/kaniko-project\/executor\",\n\t\tArgs: []string{\"--dockerfile=\/workspace\/Dockerfile\", destination},\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"dockerfile-vol\", \/\/TODO: make it configurable\n\t\t\t\tMountPath: \"\/workspace\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"func-vol\",\n\t\t\t\tMountPath: \"\/src\",\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &buildContainer\n}\n<commit_msg>Revert \"add functionName as container name in service\"<commit_after>package utils\n\nimport (\n\t\"fmt\"\n\n\tbuildv1alpha1 \"github.com\/knative\/build\/pkg\/apis\/build\/v1alpha1\"\n\tservingv1alpha1 \"github.com\/knative\/serving\/pkg\/apis\/serving\/v1alpha1\"\n\truntimev1alpha1 \"github.com\/kyma-incubator\/runtime\/pkg\/apis\/runtime\/v1alpha1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ GetServiceSpec gets ServiceSpec for a function\nfunc GetServiceSpec(imageName string, fn runtimev1alpha1.Function, rnInfo *RuntimeInfo) servingv1alpha1.ServiceSpec {\n\tdefaultMode := int32(420)\n\tbuildContainer := getBuildContainer(imageName, fn, rnInfo)\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"dockerfile-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: rnInfo.DockerFileConfigMapName(fn.Spec.Runtime),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"func-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: fn.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ TODO: Make it constant for nodejs8\/nodejs6\n\tenvVarsForRevision := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"FUNC_HANDLER\",\n\t\t\tValue: \"main\",\n\t\t},\n\t\t{\n\t\t\tName: \"MOD_NAME\",\n\t\t\tValue: \"handler\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_TIMEOUT\",\n\t\t\tValue: \"180\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_RUNTIME\",\n\t\t\tValue: \"nodejs8\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_MEMORY_LIMIT\",\n\t\t\tValue: \"128Mi\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_PORT\",\n\t\t\tValue: \"8080\",\n\t\t},\n\t\t{\n\t\t\tName: \"NODE_PATH\",\n\t\t\tValue: \"$(KUBELESS_INSTALL_VOLUME)\/node_modules\",\n\t\t},\n\t}\n\n\treturn servingv1alpha1.ServiceSpec{\n\t\tRunLatest: &servingv1alpha1.RunLatestType{\n\t\t\tConfiguration: servingv1alpha1.ConfigurationSpec{\n\t\t\t\tBuild: &servingv1alpha1.RawExtension{\n\t\t\t\t\tBuildSpec: &buildv1alpha1.BuildSpec{\n\t\t\t\t\t\tServiceAccountName: rnInfo.ServiceAccount,\n\t\t\t\t\t\tSteps: []corev1.Container{\n\t\t\t\t\t\t\t*buildContainer,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRevisionTemplate: servingv1alpha1.RevisionTemplateSpec{\n\t\t\t\t\tSpec: servingv1alpha1.RevisionSpec{\n\t\t\t\t\t\tContainer: corev1.Container{\n\t\t\t\t\t\t\tImage: imageName,\n\t\t\t\t\t\t\tEnv: envVarsForRevision,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc getBuildContainer(imageName string, fn runtimev1alpha1.Function, riUtil *RuntimeInfo) *corev1.Container {\n\tdestination := fmt.Sprintf(\"--destination=%s\", imageName)\n\tbuildContainer := corev1.Container{\n\t\tName: \"build-and-push\",\n\t\tImage: \"gcr.io\/kaniko-project\/executor\",\n\t\tArgs: []string{\"--dockerfile=\/workspace\/Dockerfile\", destination},\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"dockerfile-vol\", \/\/TODO: make it configurable\n\t\t\t\tMountPath: \"\/workspace\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"func-vol\",\n\t\t\t\tMountPath: \"\/src\",\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &buildContainer\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"player\"\n)\n\nconst PORT = 4711\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", handleRequest)\n\tif err := http.ListenAndServe(fmt.Sprintf(\":%d\", PORT), nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handleRequest(w http.ResponseWriter, request *http.Request) {\n\tif err := request.ParseForm(); err != nil {\n\t\tlog.Printf(\"Error parsing form data: %s\", err)\n\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\treturn\n\t}\n\taction := request.FormValue(\"action\")\n\tlog.Printf(\"Request method=%s url=%s action=%s from client=%s\\n\", request.Method, request.URL, action, request.RemoteAddr)\n\tswitch action {\n\tcase \"bet_request\":\n\t\tgameState := parseGameState(request.FormValue(\"game_state\"))\n\t\tif gameState == nil {\n\t\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\t\treturn\n\t\t}\n\t\tresult := player.BetRequest(gameState)\n\t\tfmt.Fprintf(w, \"%d\", result)\n\t\treturn\n\tcase \"showdown\":\n\t\tgameState := parseGameState(request.FormValue(\"game_state\"))\n\t\tif gameState == nil {\n\t\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\t\treturn\n\t\t}\n\t\tplayer.Showdown(gameState)\n\t\tfmt.Fprint(w, \"\")\n\t\treturn\n\tcase \"version\":\n\t\tfmt.Fprint(w, player.Version())\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Invalid action\", 400)\n\t}\n}\n\nfunc parseGameState(stateStr string) *player.GameState {\n\tstateBytes := []byte(stateStr)\n\tgameState := new(player.GameState)\n\tif err := json.Unmarshal(stateBytes, &gameState); err != nil {\n\t\tlog.Printf(\"Error parsing game state: %s\", err)\n\t\treturn nil\n\t}\n\treturn gameState\n}\n<commit_msg>Add check action<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"player\"\n)\n\nconst PORT = 4711\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", handleRequest)\n\tif err := http.ListenAndServe(fmt.Sprintf(\":%d\", PORT), nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handleRequest(w http.ResponseWriter, request *http.Request) {\n\tif err := request.ParseForm(); err != nil {\n\t\tlog.Printf(\"Error parsing form data: %s\", err)\n\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\treturn\n\t}\n\taction := request.FormValue(\"action\")\n\tlog.Printf(\"Request method=%s url=%s action=%s from client=%s\\n\", request.Method, request.URL, action, request.RemoteAddr)\n\tswitch action {\n\tcase \"check\":\n\t\tfmt.Fprint(w, \"\")\n\t\treturn\n\tcase \"bet_request\":\n\t\tgameState := parseGameState(request.FormValue(\"game_state\"))\n\t\tif gameState == nil {\n\t\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\t\treturn\n\t\t}\n\t\tresult := player.BetRequest(gameState)\n\t\tfmt.Fprintf(w, \"%d\", result)\n\t\treturn\n\tcase \"showdown\":\n\t\tgameState := parseGameState(request.FormValue(\"game_state\"))\n\t\tif gameState == nil {\n\t\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\t\treturn\n\t\t}\n\t\tplayer.Showdown(gameState)\n\t\tfmt.Fprint(w, \"\")\n\t\treturn\n\tcase \"version\":\n\t\tfmt.Fprint(w, player.Version())\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Invalid action\", 400)\n\t}\n}\n\nfunc parseGameState(stateStr string) *player.GameState {\n\tstateBytes := []byte(stateStr)\n\tgameState := new(player.GameState)\n\tif err := json.Unmarshal(stateBytes, &gameState); err != nil {\n\t\tlog.Printf(\"Error parsing game state: %s\", err)\n\t\treturn nil\n\t}\n\treturn gameState\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\n\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage volume\n\nimport (\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"os\"\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tutilfeature \"k8s.io\/apiserver\/pkg\/util\/feature\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kubernetes\/pkg\/features\"\n\t\"k8s.io\/kubernetes\/pkg\/volume\/util\/types\"\n)\n\nconst (\n\trwMask = os.FileMode(0660)\n\troMask = os.FileMode(0440)\n\texecMask = os.FileMode(0110)\n)\n\n\/\/ SetVolumeOwnership modifies the given volume to be owned by\n\/\/ fsGroup, and sets SetGid so that newly created files are owned by\n\/\/ fsGroup. If fsGroup is nil nothing is done.\nfunc SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {\n\tif fsGroup == nil {\n\t\treturn nil\n\t}\n\n\tfsGroupPolicyEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConfigurableFSGroupPolicy)\n\n\ttimer := time.AfterFunc(30*time.Second, func() {\n\t\tklog.Warningf(\"Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https:\/\/github.com\/kubernetes\/kubernetes\/issues\/69699\", mounter.GetPath())\n\t})\n\tdefer timer.Stop()\n\n\t\/\/ This code exists for legacy purposes, so as old behaviour is entirely preserved when feature gate is disabled\n\t\/\/ TODO: remove this when ConfigurableFSGroupPolicy turns GA.\n\tif !fsGroupPolicyEnabled {\n\t\terr := legacyOwnershipChange(mounter, fsGroup)\n\t\tif completeFunc != nil {\n\t\t\tcompleteFunc(types.CompleteFuncParam{\n\t\t\t\tErr: &err,\n\t\t\t})\n\t\t}\n\t\treturn err\n\t}\n\n\tif skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) {\n\t\tklog.V(3).Infof(\"skipping permission and ownership change for volume %s\", mounter.GetPath())\n\t\treturn nil\n\t}\n\n\terr := walkDeep(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn changeFilePermission(path, fsGroup, mounter.GetAttributes().ReadOnly, info)\n\t})\n\tif completeFunc != nil {\n\t\tcompleteFunc(types.CompleteFuncParam{\n\t\t\tErr: &err,\n\t\t})\n\t}\n\treturn err\n}\n\nfunc legacyOwnershipChange(mounter Mounter, fsGroup *int64) error {\n\treturn filepath.Walk(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn changeFilePermission(path, fsGroup, mounter.GetAttributes().ReadOnly, info)\n\t})\n}\n\nfunc changeFilePermission(filename string, fsGroup *int64, readonly bool, info os.FileInfo) error {\n\terr := os.Lchown(filename, -1, int(*fsGroup))\n\tif err != nil {\n\t\tklog.Errorf(\"Lchown failed on %v: %v\", filename, err)\n\t}\n\n\t\/\/ chmod passes through to the underlying file for symlinks.\n\t\/\/ Symlinks have a mode of 777 but this really doesn't mean anything.\n\t\/\/ The permissions of the underlying file are what matter.\n\t\/\/ However, if one reads the mode of a symlink then chmods the symlink\n\t\/\/ with that mode, it changes the mode of the underlying file, overridden\n\t\/\/ the defaultMode and permissions initialized by the volume plugin, which\n\t\/\/ is not what we want; thus, we skip chmod for symlinks.\n\tif info.Mode()&os.ModeSymlink != 0 {\n\t\treturn nil\n\t}\n\n\tmask := rwMask\n\tif readonly {\n\t\tmask = roMask\n\t}\n\n\tif info.IsDir() {\n\t\tmask |= os.ModeSetgid\n\t\tmask |= execMask\n\t}\n\n\terr = os.Chmod(filename, info.Mode()|mask)\n\tif err != nil {\n\t\tklog.Errorf(\"Chmod failed on %v: %v\", filename, err)\n\t}\n\n\treturn nil\n}\n\nfunc skipPermissionChange(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {\n\tdir := mounter.GetPath()\n\n\tif fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch {\n\t\tklog.V(4).Infof(\"perform recursive ownership change for %s\", dir)\n\t\treturn false\n\t}\n\treturn !requiresPermissionChange(mounter.GetPath(), fsGroup, mounter.GetAttributes().ReadOnly)\n}\n\nfunc requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool {\n\tfsInfo, err := os.Stat(rootDir)\n\tif err != nil {\n\t\tklog.Errorf(\"performing recursive ownership change on %s because reading permissions of root volume failed: %v\", rootDir, err)\n\t\treturn true\n\t}\n\tstat, ok := fsInfo.Sys().(*syscall.Stat_t)\n\tif !ok || stat == nil {\n\t\tklog.Errorf(\"performing recursive ownership change on %s because reading permissions of root volume failed\", rootDir)\n\t\treturn true\n\t}\n\n\tif int(stat.Gid) != int(*fsGroup) {\n\t\tklog.V(4).Infof(\"expected group ownership of volume %s did not match with: %d\", rootDir, stat.Gid)\n\t\treturn true\n\t}\n\tunixPerms := rwMask\n\n\tif readonly {\n\t\tunixPerms = roMask\n\t}\n\n\t\/\/ if rootDir is not a directory then we should apply permission change anyways\n\tif !fsInfo.IsDir() {\n\t\treturn true\n\t}\n\tunixPerms |= execMask\n\tfilePerm := fsInfo.Mode().Perm()\n\n\t\/\/ We need to check if actual permissions of root directory is a superset of permissions required by unixPerms.\n\t\/\/ This is done by checking if permission bits expected in unixPerms is set in actual permissions of the directory.\n\t\/\/ We use bitwise AND operation to check set bits. For example:\n\t\/\/ unixPerms: 770, filePerms: 775 : 770&775 = 770 (perms on directory is a superset)\n\t\/\/ unixPerms: 770, filePerms: 770 : 770&770 = 770 (perms on directory is a superset)\n\t\/\/ unixPerms: 770, filePerms: 750 : 770&750 = 750 (perms on directory is NOT a superset)\n\t\/\/ We also need to check if setgid bits are set in permissions of the directory.\n\tif (unixPerms&filePerm != unixPerms) || (fsInfo.Mode()&os.ModeSetgid == 0) {\n\t\tklog.V(4).Infof(\"performing recursive ownership change on %s because of mismatching mode\", rootDir)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ readDirNames reads the directory named by dirname and returns\n\/\/ a list of directory entries.\n\/\/ We are not using filepath.readDirNames because we do not want to sort files found in a directory before changing\n\/\/ permissions for performance reasons.\nfunc readDirNames(dirname string) ([]string, error) {\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnames, err := f.Readdirnames(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn names, nil\n}\n\n\/\/ walkDeep can be used to traverse directories and has two minor differences\n\/\/ from filepath.Walk:\n\/\/ - List of files\/dirs is not sorted for performance reasons\n\/\/ - callback walkFunc is invoked on root directory after visiting children dirs and files\nfunc walkDeep(root string, walkFunc filepath.WalkFunc) error {\n\tinfo, err := os.Lstat(root)\n\tif err != nil {\n\t\treturn walkFunc(root, nil, err)\n\t}\n\treturn walk(root, info, walkFunc)\n}\n\nfunc walk(path string, info os.FileInfo, walkFunc filepath.WalkFunc) error {\n\tif !info.IsDir() {\n\t\treturn walkFunc(path, info, nil)\n\t}\n\tnames, err := readDirNames(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, name := range names {\n\t\tfilename := filepath.Join(path, name)\n\t\tfileInfo, err := os.Lstat(filename)\n\t\tif err != nil {\n\t\t\tif err := walkFunc(filename, fileInfo, err); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = walk(filename, fileInfo, walkFunc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn walkFunc(path, info, nil)\n}\n<commit_msg>migrate log in pkg\/volume\/volume_linux.go<commit_after>\/\/ +build linux\n\n\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage volume\n\nimport (\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"os\"\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tutilfeature \"k8s.io\/apiserver\/pkg\/util\/feature\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kubernetes\/pkg\/features\"\n\t\"k8s.io\/kubernetes\/pkg\/volume\/util\/types\"\n)\n\nconst (\n\trwMask = os.FileMode(0660)\n\troMask = os.FileMode(0440)\n\texecMask = os.FileMode(0110)\n)\n\n\/\/ SetVolumeOwnership modifies the given volume to be owned by\n\/\/ fsGroup, and sets SetGid so that newly created files are owned by\n\/\/ fsGroup. If fsGroup is nil nothing is done.\nfunc SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {\n\tif fsGroup == nil {\n\t\treturn nil\n\t}\n\n\tfsGroupPolicyEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConfigurableFSGroupPolicy)\n\n\ttimer := time.AfterFunc(30*time.Second, func() {\n\t\tklog.Warningf(\"Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https:\/\/github.com\/kubernetes\/kubernetes\/issues\/69699\", mounter.GetPath())\n\t})\n\tdefer timer.Stop()\n\n\t\/\/ This code exists for legacy purposes, so as old behaviour is entirely preserved when feature gate is disabled\n\t\/\/ TODO: remove this when ConfigurableFSGroupPolicy turns GA.\n\tif !fsGroupPolicyEnabled {\n\t\terr := legacyOwnershipChange(mounter, fsGroup)\n\t\tif completeFunc != nil {\n\t\t\tcompleteFunc(types.CompleteFuncParam{\n\t\t\t\tErr: &err,\n\t\t\t})\n\t\t}\n\t\treturn err\n\t}\n\n\tif skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) {\n\t\tklog.V(3).InfoS(\"Skipping permission and ownership change for volume\", \"path\", mounter.GetPath())\n\t\treturn nil\n\t}\n\n\terr := walkDeep(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn changeFilePermission(path, fsGroup, mounter.GetAttributes().ReadOnly, info)\n\t})\n\tif completeFunc != nil {\n\t\tcompleteFunc(types.CompleteFuncParam{\n\t\t\tErr: &err,\n\t\t})\n\t}\n\treturn err\n}\n\nfunc legacyOwnershipChange(mounter Mounter, fsGroup *int64) error {\n\treturn filepath.Walk(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn changeFilePermission(path, fsGroup, mounter.GetAttributes().ReadOnly, info)\n\t})\n}\n\nfunc changeFilePermission(filename string, fsGroup *int64, readonly bool, info os.FileInfo) error {\n\terr := os.Lchown(filename, -1, int(*fsGroup))\n\tif err != nil {\n\t\tklog.ErrorS(err, \"Lchown failed\", \"path\", filename)\n\t}\n\n\t\/\/ chmod passes through to the underlying file for symlinks.\n\t\/\/ Symlinks have a mode of 777 but this really doesn't mean anything.\n\t\/\/ The permissions of the underlying file are what matter.\n\t\/\/ However, if one reads the mode of a symlink then chmods the symlink\n\t\/\/ with that mode, it changes the mode of the underlying file, overridden\n\t\/\/ the defaultMode and permissions initialized by the volume plugin, which\n\t\/\/ is not what we want; thus, we skip chmod for symlinks.\n\tif info.Mode()&os.ModeSymlink != 0 {\n\t\treturn nil\n\t}\n\n\tmask := rwMask\n\tif readonly {\n\t\tmask = roMask\n\t}\n\n\tif info.IsDir() {\n\t\tmask |= os.ModeSetgid\n\t\tmask |= execMask\n\t}\n\n\terr = os.Chmod(filename, info.Mode()|mask)\n\tif err != nil {\n\t\tklog.ErrorS(err, \"Chown failed\", \"path\", filename)\n\t}\n\n\treturn nil\n}\n\nfunc skipPermissionChange(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {\n\tdir := mounter.GetPath()\n\n\tif fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch {\n\t\tklog.V(4).InfoS(\"Perform recursive ownership change for directory\", \"path\", dir)\n\t\treturn false\n\t}\n\treturn !requiresPermissionChange(mounter.GetPath(), fsGroup, mounter.GetAttributes().ReadOnly)\n}\n\nfunc requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool {\n\tfsInfo, err := os.Stat(rootDir)\n\tif err != nil {\n\t\tklog.ErrorS(err, \"Performing recursive ownership change on rootDir because reading permissions of root volume failed\", \"path\", rootDir)\n\t\treturn true\n\t}\n\tstat, ok := fsInfo.Sys().(*syscall.Stat_t)\n\tif !ok || stat == nil {\n\t\tklog.ErrorS(nil, \"Performing recursive ownership change on rootDir because reading permissions of root volume failed\", \"path\", rootDir)\n\t\treturn true\n\t}\n\n\tif int(stat.Gid) != int(*fsGroup) {\n\t\tklog.V(4).InfoS(\"Expected group ownership of volume did not match with Gid\", \"path\", rootDir, \"GID\", stat.Gid)\n\t\treturn true\n\t}\n\tunixPerms := rwMask\n\n\tif readonly {\n\t\tunixPerms = roMask\n\t}\n\n\t\/\/ if rootDir is not a directory then we should apply permission change anyways\n\tif !fsInfo.IsDir() {\n\t\treturn true\n\t}\n\tunixPerms |= execMask\n\tfilePerm := fsInfo.Mode().Perm()\n\n\t\/\/ We need to check if actual permissions of root directory is a superset of permissions required by unixPerms.\n\t\/\/ This is done by checking if permission bits expected in unixPerms is set in actual permissions of the directory.\n\t\/\/ We use bitwise AND operation to check set bits. For example:\n\t\/\/ unixPerms: 770, filePerms: 775 : 770&775 = 770 (perms on directory is a superset)\n\t\/\/ unixPerms: 770, filePerms: 770 : 770&770 = 770 (perms on directory is a superset)\n\t\/\/ unixPerms: 770, filePerms: 750 : 770&750 = 750 (perms on directory is NOT a superset)\n\t\/\/ We also need to check if setgid bits are set in permissions of the directory.\n\tif (unixPerms&filePerm != unixPerms) || (fsInfo.Mode()&os.ModeSetgid == 0) {\n\t\tklog.V(4).InfoS(\"Performing recursive ownership change on rootDir because of mismatching mode\", \"path\", rootDir)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ readDirNames reads the directory named by dirname and returns\n\/\/ a list of directory entries.\n\/\/ We are not using filepath.readDirNames because we do not want to sort files found in a directory before changing\n\/\/ permissions for performance reasons.\nfunc readDirNames(dirname string) ([]string, error) {\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnames, err := f.Readdirnames(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn names, nil\n}\n\n\/\/ walkDeep can be used to traverse directories and has two minor differences\n\/\/ from filepath.Walk:\n\/\/ - List of files\/dirs is not sorted for performance reasons\n\/\/ - callback walkFunc is invoked on root directory after visiting children dirs and files\nfunc walkDeep(root string, walkFunc filepath.WalkFunc) error {\n\tinfo, err := os.Lstat(root)\n\tif err != nil {\n\t\treturn walkFunc(root, nil, err)\n\t}\n\treturn walk(root, info, walkFunc)\n}\n\nfunc walk(path string, info os.FileInfo, walkFunc filepath.WalkFunc) error {\n\tif !info.IsDir() {\n\t\treturn walkFunc(path, info, nil)\n\t}\n\tnames, err := readDirNames(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, name := range names {\n\t\tfilename := filepath.Join(path, name)\n\t\tfileInfo, err := os.Lstat(filename)\n\t\tif err != nil {\n\t\t\tif err := walkFunc(filename, fileInfo, err); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = walk(filename, fileInfo, walkFunc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn walkFunc(path, info, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fileutil\n\nimport (\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype MmapFile struct {\n\tf *os.File\n\tb []byte\n}\n\nfunc OpenMmapFile(path string) (*MmapFile, error) {\n\treturn OpenMmapFileWithSize(path, 0)\n}\n\nfunc OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"try lock file\")\n\t}\n\tdefer func() {\n\t\tif retErr != nil {\n\t\t\tf.Close()\n\t\t}\n\t}()\n\tif size <= 0 {\n\t\tinfo, err := f.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"stat\")\n\t\t}\n\t\tsize = int(info.Size())\n\t}\n\n\tb, err := mmap(f, size)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"mmap\")\n\t}\n\n\treturn &MmapFile{f: f, b: b}, nil\n}\n\nfunc (f *MmapFile) Close() error {\n\terr0 := munmap(f.b)\n\terr1 := f.f.Close()\n\n\tif err0 != nil {\n\t\treturn err0\n\t}\n\treturn err1\n}\n\nfunc (f *MmapFile) File() *os.File {\n\treturn f.f\n}\n\nfunc (f *MmapFile) Bytes() []byte {\n\treturn f.b\n}\n<commit_msg>More info in mmap error message (#8058)<commit_after>\/\/ Copyright 2018 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fileutil\n\nimport (\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype MmapFile struct {\n\tf *os.File\n\tb []byte\n}\n\nfunc OpenMmapFile(path string) (*MmapFile, error) {\n\treturn OpenMmapFileWithSize(path, 0)\n}\n\nfunc OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"try lock file\")\n\t}\n\tdefer func() {\n\t\tif retErr != nil {\n\t\t\tf.Close()\n\t\t}\n\t}()\n\tif size <= 0 {\n\t\tinfo, err := f.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"stat\")\n\t\t}\n\t\tsize = int(info.Size())\n\t}\n\n\tb, err := mmap(f, size)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"mmap, size %d\", size)\n\t}\n\n\treturn &MmapFile{f: f, b: b}, nil\n}\n\nfunc (f *MmapFile) Close() error {\n\terr0 := munmap(f.b)\n\terr1 := f.f.Close()\n\n\tif err0 != nil {\n\t\treturn err0\n\t}\n\treturn err1\n}\n\nfunc (f *MmapFile) File() *os.File {\n\treturn f.f\n}\n\nfunc (f *MmapFile) Bytes() []byte {\n\treturn f.b\n}\n<|endoftext|>"} {"text":"<commit_before>package finalize\n\nconst (\n\tinitScript = `\n# ------------------------------------------------------------------------------------------------\n# Copyright 2013 Jordon Bedwell.\n# Apache License.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License at:\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the\n# License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n# either express or implied. See the License for the specific language governing permissions\n# and limitations under the License.\n# ------------------------------------------------------------------------------------------------\n\nexport APP_ROOT=$HOME\nexport LD_LIBRARY_PATH=$APP_ROOT\/nginx\/lib:$LD_LIBRARY_PATH\n\nmv $APP_ROOT\/nginx\/conf\/nginx.conf $APP_ROOT\/nginx\/conf\/nginx.conf.erb\nerb $APP_ROOT\/nginx\/conf\/nginx.conf.erb > $APP_ROOT\/nginx\/conf\/nginx.conf\n\nif [[ ! -f $APP_ROOT\/nginx\/logs\/access.log ]]; then\n mkfifo $APP_ROOT\/nginx\/logs\/access.log\nfi\n\nif [[ ! -f $APP_ROOT\/nginx\/logs\/error.log ]]; then\n mkfifo $APP_ROOT\/nginx\/logs\/error.log\nfi\n`\n\n\tstartLoggingScript = `\ncat < $APP_ROOT\/nginx\/logs\/access.log &\n(>&2 cat) < $APP_ROOT\/nginx\/logs\/error.log &\n`\n\n\tstartCommand = `#!\/bin\/sh\nset -ex\n$APP_ROOT\/start_logging.sh\nnginx -p $APP_ROOT\/nginx -c $APP_ROOT\/nginx\/conf\/nginx.conf\n`\n\n\tnginxConfTemplate = `\nworker_processes 1;\ndaemon off;\n\nerror_log <%= ENV[\"APP_ROOT\"] %>\/nginx\/logs\/error.log;\nevents { worker_connections 1024; }\n\nhttp {\n charset utf-8;\n log_format cloudfoundry '$http_x_forwarded_for - $http_referer - [$time_local] \"$request\" $status $body_bytes_sent';\n access_log <%= ENV[\"APP_ROOT\"] %>\/nginx\/logs\/access.log cloudfoundry;\n default_type application\/octet-stream;\n include mime.types;\n sendfile on;\n\n gzip on;\n gzip_disable \"msie6\";\n gzip_comp_level 6;\n gzip_min_length 1100;\n gzip_buffers 16 8k;\n gzip_proxied any;\n gunzip on;\n gzip_static always;\n gzip_types text\/plain text\/css text\/js text\/xml text\/javascript application\/javascript application\/x-javascript application\/json application\/xml application\/xml+rss;\n gzip_vary on;\n\n tcp_nopush on;\n keepalive_timeout 30;\n port_in_redirect off; # Ensure that redirects don't include the internal container PORT - <%= ENV[\"PORT\"] %>\n server_tokens off;\n\n server {\n listen <%= ENV[\"PORT\"] %>;\n server_name localhost;\n\n root <%= ENV[\"APP_ROOT\"] %>\/public;\n\n {{if .ForceHTTPS}}\n if ($http_x_forwarded_proto != \"https\") {\n return 301 https:\/\/$host$request_uri;\n }\n {{else}}\n <% if ENV[\"FORCE_HTTPS\"] %>\n if ($http_x_forwarded_proto != \"https\") {\n return 301 https:\/\/$host$request_uri;\n }\n <% end %>\n {{end}}\n\n\n location \/ {\n {{if .PushState}}\n if (!-e $request_filename) {\n rewrite ^(.*)$ \/ break;\n }\n {{end}}\n\n index index.html index.htm Default.htm;\n\n {{if .DirectoryIndex}}\n autoindex on;\n {{end}}\n\n {{if .BasicAuth}}\n auth_basic \"Restricted\"; #For Basic Auth\n auth_basic_user_file <%= ENV[\"APP_ROOT\"] %>\/nginx\/conf\/.htpasswd;\n {{end}}\n\n {{if .SSI}}\n ssi on;\n {{end}}\n\n {{if .HSTS}}\n add_header Strict-Transport-Security \"max-age=31536000{{if .HSTSIncludeSubDomains}}; includeSubDomains{{end}}{{if .HSTSPreload}}; preload{{end}}\";\n {{end}}\n\n {{if ne .LocationInclude \"\"}}\n include {{.LocationInclude}};\n {{end}}\n\n\t\t\t{{ range $code, $value := .StatusCodes }}\n\t\t\t error_page {{ $code }} {{ $value }};\n\t\t {{ end }}\n }\n\n {{if not .HostDotFiles}}\n location ~ \/\\. {\n deny all;\n return 404;\n }\n {{end}}\n }\n}\n`\n\tMimeTypes = `\ntypes {\n text\/html html htm shtml;\n text\/css css;\n text\/xml xml;\n image\/gif gif;\n image\/jpeg jpeg jpg;\n application\/x-javascript js;\n application\/atom+xml atom;\n application\/rss+xml rss;\n font\/ttf ttf;\n font\/woff woff;\n font\/woff2 woff2;\n text\/mathml mml;\n text\/plain txt;\n text\/vnd.sun.j2me.app-descriptor jad;\n text\/vnd.wap.wml wml;\n text\/x-component htc;\n text\/cache-manifest manifest;\n image\/png png;\n image\/tiff tif tiff;\n image\/vnd.wap.wbmp wbmp;\n image\/x-icon ico;\n image\/x-jng jng;\n image\/x-ms-bmp bmp;\n image\/svg+xml svg svgz;\n image\/webp webp;\n application\/java-archive jar war ear;\n application\/mac-binhex40 hqx;\n application\/msword doc;\n application\/pdf pdf;\n application\/postscript ps eps ai;\n application\/rtf rtf;\n application\/vnd.ms-excel xls;\n application\/vnd.ms-powerpoint ppt;\n application\/vnd.wap.wmlc wmlc;\n application\/vnd.google-earth.kml+xml kml;\n application\/vnd.google-earth.kmz kmz;\n application\/x-7z-compressed 7z;\n application\/x-cocoa cco;\n application\/x-java-archive-diff jardiff;\n application\/x-java-jnlp-file jnlp;\n application\/x-makeself run;\n application\/x-perl pl pm;\n application\/x-pilot prc pdb;\n application\/x-rar-compressed rar;\n application\/x-redhat-package-manager rpm;\n application\/x-sea sea;\n application\/x-shockwave-flash swf;\n application\/x-stuffit sit;\n application\/x-tcl tcl tk;\n application\/x-x509-ca-cert der pem crt;\n application\/x-xpinstall xpi;\n application\/xhtml+xml xhtml;\n application\/zip zip;\n application\/octet-stream bin exe dll;\n application\/octet-stream deb;\n application\/octet-stream dmg;\n application\/octet-stream eot;\n application\/octet-stream iso img;\n application\/octet-stream msi msp msm;\n application\/json json;\n audio\/midi mid midi kar;\n audio\/mpeg mp3;\n audio\/ogg ogg;\n audio\/x-m4a m4a;\n audio\/x-realaudio ra;\n video\/3gpp 3gpp 3gp;\n video\/mp4 mp4;\n video\/mpeg mpeg mpg;\n video\/quicktime mov;\n video\/webm webm;\n video\/x-flv flv;\n video\/x-m4v m4v;\n video\/x-mng mng;\n video\/x-ms-asf asx asf;\n video\/x-ms-wmv wmv;\n video\/x-msvideo avi;\n}\n`\n)\n<commit_msg>Preserves x_forwarded_host on https<commit_after>package finalize\n\nconst (\n\tinitScript = `\n# ------------------------------------------------------------------------------------------------\n# Copyright 2013 Jordon Bedwell.\n# Apache License.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License at:\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the\n# License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n# either express or implied. See the License for the specific language governing permissions\n# and limitations under the License.\n# ------------------------------------------------------------------------------------------------\n\nexport APP_ROOT=$HOME\nexport LD_LIBRARY_PATH=$APP_ROOT\/nginx\/lib:$LD_LIBRARY_PATH\n\nmv $APP_ROOT\/nginx\/conf\/nginx.conf $APP_ROOT\/nginx\/conf\/nginx.conf.erb\nerb $APP_ROOT\/nginx\/conf\/nginx.conf.erb > $APP_ROOT\/nginx\/conf\/nginx.conf\n\nif [[ ! -f $APP_ROOT\/nginx\/logs\/access.log ]]; then\n mkfifo $APP_ROOT\/nginx\/logs\/access.log\nfi\n\nif [[ ! -f $APP_ROOT\/nginx\/logs\/error.log ]]; then\n mkfifo $APP_ROOT\/nginx\/logs\/error.log\nfi\n`\n\n\tstartLoggingScript = `\ncat < $APP_ROOT\/nginx\/logs\/access.log &\n(>&2 cat) < $APP_ROOT\/nginx\/logs\/error.log &\n`\n\n\tstartCommand = `#!\/bin\/sh\nset -ex\n$APP_ROOT\/start_logging.sh\nnginx -p $APP_ROOT\/nginx -c $APP_ROOT\/nginx\/conf\/nginx.conf\n`\n\n\tnginxConfTemplate = `\nworker_processes 1;\ndaemon off;\n\nerror_log <%= ENV[\"APP_ROOT\"] %>\/nginx\/logs\/error.log;\nevents { worker_connections 1024; }\n\nhttp {\n charset utf-8;\n log_format cloudfoundry '$http_x_forwarded_for - $http_referer - [$time_local] \"$request\" $status $body_bytes_sent';\n access_log <%= ENV[\"APP_ROOT\"] %>\/nginx\/logs\/access.log cloudfoundry;\n default_type application\/octet-stream;\n include mime.types;\n sendfile on;\n\n gzip on;\n gzip_disable \"msie6\";\n gzip_comp_level 6;\n gzip_min_length 1100;\n gzip_buffers 16 8k;\n gzip_proxied any;\n gunzip on;\n gzip_static always;\n gzip_types text\/plain text\/css text\/js text\/xml text\/javascript application\/javascript application\/x-javascript application\/json application\/xml application\/xml+rss;\n gzip_vary on;\n\n tcp_nopush on;\n keepalive_timeout 30;\n port_in_redirect off; # Ensure that redirects don't include the internal container PORT - <%= ENV[\"PORT\"] %>\n server_tokens off;\n\n server {\n listen <%= ENV[\"PORT\"] %>;\n server_name localhost;\n\n root <%= ENV[\"APP_ROOT\"] %>\/public;\n\n {{if .ForceHTTPS}}\n\t set $updated_host $host;\n\t if ($http_x_forwarded_host != \"\") {\n set $updated_host $http_x_forwarded_host;\n } \n\n if ($http_x_forwarded_proto != \"https\") {\n\t return 301 https:\/\/$updated_host$request_uri;\n\t }\n {{else}}\n\t<% if ENV[\"FORCE_HTTPS\"] %>\n\t set $updated_host $host;\n\t if ($http_x_forwarded_host != \"\") {\n set $updated_host $http_x_forwarded_host;\n } \n\n if ($http_x_forwarded_proto != \"https\") {\n\t return 301 https:\/\/$updated_host$request_uri;\n\t }\n\t<% end %>\n {{end}}\n\n\n location \/ {\n {{if .PushState}}\n if (!-e $request_filename) {\n rewrite ^(.*)$ \/ break;\n }\n {{end}}\n\n index index.html index.htm Default.htm;\n\n {{if .DirectoryIndex}}\n autoindex on;\n {{end}}\n\n {{if .BasicAuth}}\n auth_basic \"Restricted\"; #For Basic Auth\n auth_basic_user_file <%= ENV[\"APP_ROOT\"] %>\/nginx\/conf\/.htpasswd;\n {{end}}\n\n {{if .SSI}}\n ssi on;\n {{end}}\n\n {{if .HSTS}}\n add_header Strict-Transport-Security \"max-age=31536000{{if .HSTSIncludeSubDomains}}; includeSubDomains{{end}}{{if .HSTSPreload}}; preload{{end}}\";\n {{end}}\n\n {{if ne .LocationInclude \"\"}}\n include {{.LocationInclude}};\n {{end}}\n\n\t\t\t{{ range $code, $value := .StatusCodes }}\n\t\t\t error_page {{ $code }} {{ $value }};\n\t\t {{ end }}\n }\n\n {{if not .HostDotFiles}}\n location ~ \/\\. {\n deny all;\n return 404;\n }\n {{end}}\n }\n}\n`\n\tMimeTypes = `\ntypes {\n text\/html html htm shtml;\n text\/css css;\n text\/xml xml;\n image\/gif gif;\n image\/jpeg jpeg jpg;\n application\/x-javascript js;\n application\/atom+xml atom;\n application\/rss+xml rss;\n font\/ttf ttf;\n font\/woff woff;\n font\/woff2 woff2;\n text\/mathml mml;\n text\/plain txt;\n text\/vnd.sun.j2me.app-descriptor jad;\n text\/vnd.wap.wml wml;\n text\/x-component htc;\n text\/cache-manifest manifest;\n image\/png png;\n image\/tiff tif tiff;\n image\/vnd.wap.wbmp wbmp;\n image\/x-icon ico;\n image\/x-jng jng;\n image\/x-ms-bmp bmp;\n image\/svg+xml svg svgz;\n image\/webp webp;\n application\/java-archive jar war ear;\n application\/mac-binhex40 hqx;\n application\/msword doc;\n application\/pdf pdf;\n application\/postscript ps eps ai;\n application\/rtf rtf;\n application\/vnd.ms-excel xls;\n application\/vnd.ms-powerpoint ppt;\n application\/vnd.wap.wmlc wmlc;\n application\/vnd.google-earth.kml+xml kml;\n application\/vnd.google-earth.kmz kmz;\n application\/x-7z-compressed 7z;\n application\/x-cocoa cco;\n application\/x-java-archive-diff jardiff;\n application\/x-java-jnlp-file jnlp;\n application\/x-makeself run;\n application\/x-perl pl pm;\n application\/x-pilot prc pdb;\n application\/x-rar-compressed rar;\n application\/x-redhat-package-manager rpm;\n application\/x-sea sea;\n application\/x-shockwave-flash swf;\n application\/x-stuffit sit;\n application\/x-tcl tcl tk;\n application\/x-x509-ca-cert der pem crt;\n application\/x-xpinstall xpi;\n application\/xhtml+xml xhtml;\n application\/zip zip;\n application\/octet-stream bin exe dll;\n application\/octet-stream deb;\n application\/octet-stream dmg;\n application\/octet-stream eot;\n application\/octet-stream iso img;\n application\/octet-stream msi msp msm;\n application\/json json;\n audio\/midi mid midi kar;\n audio\/mpeg mp3;\n audio\/ogg ogg;\n audio\/x-m4a m4a;\n audio\/x-realaudio ra;\n video\/3gpp 3gpp 3gp;\n video\/mp4 mp4;\n video\/mpeg mpeg mpg;\n video\/quicktime mov;\n video\/webm webm;\n video\/x-flv flv;\n video\/x-m4v m4v;\n video\/x-mng mng;\n video\/x-ms-asf asx asf;\n video\/x-ms-wmv wmv;\n video\/x-msvideo avi;\n}\n`\n)\n<|endoftext|>"} {"text":"<commit_before>package qdisksync\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\n\/\/read from snapshot and copy\nfunc SyncVolumeData(srcVolume string, destVolume string, bufferSize int64, workerCount int32) {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tcacheFile := \"qdisksync.cache\"\n\tcacheFileH, err := os.Open(cacheFile)\n\tif err != nil {\n\t\tL.Error(\"Load cache file `%s' failed\", cacheFile)\n\t\treturn\n\t}\n\tdefer cacheFileH.Close()\n\n\tbReader := bufio.NewScanner(cacheFileH)\n\tbReader.Split(bufio.ScanLines)\n\t\/\/init channel\n\tvar allWorkers int32 = 0\n\tsyncStart := time.Now()\n\tL.Informational(\"Sync `%s' -> `'%s start from `%s'\", srcVolume, destVolume, syncStart.String())\n\tsyncDone := make(chan bool)\n\t\/\/receive the sync result\n\tgo func() {\n\t\t<-syncDone\n\t}()\n\tfor bReader.Scan() {\n\t\tline := bReader.Text()\n\t\t\/\/split to name and size\n\t\titems := strings.Split(line, \"\\t\")\n\t\tif len(items) != 3 {\n\t\t\tL.Error(\"Line data `%s'' error of cache file `%s'\", line, cacheFile)\n\t\t\tcontinue\n\t\t}\n\t\tfname := items[0]\n\t\tfsize, err := strconv.ParseInt(items[1], 10, 64)\n\t\tif err != nil {\n\t\t\tL.Error(\"File length error `%s' for line `%s'\", items[1], line)\n\t\t\tcontinue\n\t\t}\n\t\tfperm, err := strconv.ParseInt(items[2], 10, 64)\n\t\tif err != nil {\n\t\t\tL.Error(\"File perm error `%s' for line `%s'\", items[2], line)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/join the path\n\t\tsrcFullPath := filepath.Join(srcVolume, fname)\n\t\tdestFullPath := filepath.Join(destVolume, fname)\n\t\t\/\/check src and dest file\n\t\tsrcFileH, srcErr := os.Open(srcFullPath)\n\t\tif srcErr != nil {\n\t\t\tL.Error(\"Open src file `%s' error `%s'\", srcFullPath, srcErr.Error())\n\t\t\tcontinue\n\t\t}\n\t\t\/\/create path if necessary\n\t\tlastSlashIndex := strings.LastIndex(destFullPath, \"\/\")\n\t\tdestFullPathBase := destFullPath[:lastSlashIndex]\n\t\tif err := os.MkdirAll(destFullPathBase, 0775); err != nil {\n\t\t\tL.Error(\"Failed to create dir `%s' due to error `%s'\", destFullPathBase, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tdestFileH, destErr := os.OpenFile(destFullPath, os.O_CREATE|os.O_RDWR, os.FileMode(fperm))\n\t\tif destErr != nil {\n\t\t\tL.Error(\"Open dest file `%s' error `%s'\", destFullPath, destErr.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/check whether it's time to run copy\n\t\tfor {\n\t\t\tcurWorkers := atomic.LoadInt32(&allWorkers)\n\t\t\tL.Debug(\"Current Workers: `%d'\", curWorkers)\n\t\t\tif curWorkers < workerCount {\n\t\t\t\tatomic.AddInt32(&allWorkers, 1)\n\t\t\t\tgo copy(srcFileH, destFileH, fsize, bufferSize, srcFullPath, destFullPath, &allWorkers)\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t\/\/wait some time to avoid infinite cycle\n\t\t\t\t<-time.After(time.Microsecond * 1)\n\t\t\t}\n\t\t}\n\t}\n\tfor {\n\t\tL.Debug(\"Remaing workers: `%s'\", atomic.LoadInt32(&allWorkers))\n\t\tif atomic.LoadInt32(&allWorkers) == 0 {\n\t\t\tsyncDone <- true\n\t\t\tsyncEnd := time.Now()\n\t\t\tL.Informational(\"Sync `%s' -> `%s' end at `%s'\", srcVolume, destVolume, syncEnd.String())\n\t\t\tL.Informational(\"Sync `%s' -> `%s' last for `%s'\", srcVolume, destVolume, time.Since(syncStart))\n\t\t\tbreak\n\t\t} else {\n\t\t\t<-time.After(time.Second * 5)\n\t\t}\n\t}\n}\n\nfunc copy(srcFileH, destFileH *os.File, fsize int64, bufferSize int64, srcFullPath, destFullPath string, allWorkers *int32) {\n\tdefer func() {\n\t\tatomic.AddInt32(allWorkers, -1)\n\t\truntime.Gosched()\n\t}()\n\tL.Debug(\"Copying from `%s' to `%s'\", srcFullPath, destFullPath)\n\tbuffer := make([]byte, bufferSize)\n\tvar cpErr error\n\tvar cpNum int64\n\tfor {\n\t\tnumRead, errRead := srcFileH.Read(buffer)\n\t\tif errRead == io.EOF {\n\t\t\tbreak\n\t\t} else {\n\t\t\tif errRead != nil {\n\t\t\t\tL.Error(\"Read from `%s' error: `%s'\", srcFullPath, errRead.Error())\n\t\t\t\tcpErr = errRead\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tnumWrite, errWrite := destFileH.Write(buffer[:numRead])\n\t\t\t\tif errWrite != nil {\n\t\t\t\t\tL.Error(\"Write to `%s' error: `%s'\", destFullPath, errWrite.Error())\n\t\t\t\t\tcpErr = errWrite\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcpNum += int64(numWrite)\n\t\t\t}\n\t\t}\n\t}\n\tdefer srcFileH.Close()\n\tdefer destFileH.Close()\n\tif cpErr != nil || cpNum != fsize {\n\t\tL.Error(\"Copy from `%s' to `%s' failed, error: `%s'\", srcFullPath, destFullPath, cpErr.Error())\n\t} else {\n\t\tL.Debug(\"Copy from `%s' to `%s' succcess\", srcFullPath, destFullPath)\n\t}\n\n}\n<commit_msg>Fix debug msg errorwq<commit_after>package qdisksync\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\n\/\/read from snapshot and copy\nfunc SyncVolumeData(srcVolume string, destVolume string, bufferSize int64, workerCount int32) {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tcacheFile := \"qdisksync.cache\"\n\tcacheFileH, err := os.Open(cacheFile)\n\tif err != nil {\n\t\tL.Error(\"Load cache file `%s' failed\", cacheFile)\n\t\treturn\n\t}\n\tdefer cacheFileH.Close()\n\n\tbReader := bufio.NewScanner(cacheFileH)\n\tbReader.Split(bufio.ScanLines)\n\t\/\/init channel\n\tvar allWorkers int32 = 0\n\tsyncStart := time.Now()\n\tL.Informational(\"Sync `%s' -> `'%s start from `%s'\", srcVolume, destVolume, syncStart.String())\n\tsyncDone := make(chan bool)\n\t\/\/receive the sync result\n\tgo func() {\n\t\t<-syncDone\n\t}()\n\tfor bReader.Scan() {\n\t\tline := bReader.Text()\n\t\t\/\/split to name and size\n\t\titems := strings.Split(line, \"\\t\")\n\t\tif len(items) != 3 {\n\t\t\tL.Error(\"Line data `%s'' error of cache file `%s'\", line, cacheFile)\n\t\t\tcontinue\n\t\t}\n\t\tfname := items[0]\n\t\tfsize, err := strconv.ParseInt(items[1], 10, 64)\n\t\tif err != nil {\n\t\t\tL.Error(\"File length error `%s' for line `%s'\", items[1], line)\n\t\t\tcontinue\n\t\t}\n\t\tfperm, err := strconv.ParseInt(items[2], 10, 64)\n\t\tif err != nil {\n\t\t\tL.Error(\"File perm error `%s' for line `%s'\", items[2], line)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/join the path\n\t\tsrcFullPath := filepath.Join(srcVolume, fname)\n\t\tdestFullPath := filepath.Join(destVolume, fname)\n\t\t\/\/check src and dest file\n\t\tsrcFileH, srcErr := os.Open(srcFullPath)\n\t\tif srcErr != nil {\n\t\t\tL.Error(\"Open src file `%s' error `%s'\", srcFullPath, srcErr.Error())\n\t\t\tcontinue\n\t\t}\n\t\t\/\/create path if necessary\n\t\tlastSlashIndex := strings.LastIndex(destFullPath, \"\/\")\n\t\tdestFullPathBase := destFullPath[:lastSlashIndex]\n\t\tif err := os.MkdirAll(destFullPathBase, 0775); err != nil {\n\t\t\tL.Error(\"Failed to create dir `%s' due to error `%s'\", destFullPathBase, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tdestFileH, destErr := os.OpenFile(destFullPath, os.O_CREATE|os.O_RDWR, os.FileMode(fperm))\n\t\tif destErr != nil {\n\t\t\tL.Error(\"Open dest file `%s' error `%s'\", destFullPath, destErr.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/check whether it's time to run copy\n\t\tfor {\n\t\t\tcurWorkers := atomic.LoadInt32(&allWorkers)\n\t\t\tL.Debug(\"Current Workers: `%d'\", curWorkers)\n\t\t\tif curWorkers < workerCount {\n\t\t\t\tatomic.AddInt32(&allWorkers, 1)\n\t\t\t\tgo copy(srcFileH, destFileH, fsize, bufferSize, srcFullPath, destFullPath, &allWorkers)\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t\/\/wait some time to avoid infinite cycle\n\t\t\t\t<-time.After(time.Microsecond * 1)\n\t\t\t}\n\t\t}\n\t}\n\tfor {\n\t\tL.Debug(\"Remained workers: `%d'\", atomic.LoadInt32(&allWorkers))\n\t\tif atomic.LoadInt32(&allWorkers) == 0 {\n\t\t\tsyncDone <- true\n\t\t\tsyncEnd := time.Now()\n\t\t\tL.Informational(\"Sync `%s' -> `%s' end at `%s'\", srcVolume, destVolume, syncEnd.String())\n\t\t\tL.Informational(\"Sync `%s' -> `%s' last for `%s'\", srcVolume, destVolume, time.Since(syncStart))\n\t\t\tbreak\n\t\t} else {\n\t\t\t<-time.After(time.Second * 5)\n\t\t}\n\t}\n}\n\nfunc copy(srcFileH, destFileH *os.File, fsize int64, bufferSize int64, srcFullPath, destFullPath string, allWorkers *int32) {\n\tdefer func() {\n\t\tatomic.AddInt32(allWorkers, -1)\n\t\truntime.Gosched()\n\t}()\n\tL.Debug(\"Copying from `%s' to `%s'\", srcFullPath, destFullPath)\n\tbuffer := make([]byte, bufferSize)\n\tvar cpErr error\n\tvar cpNum int64\n\tfor {\n\t\tnumRead, errRead := srcFileH.Read(buffer)\n\t\tif errRead == io.EOF {\n\t\t\tbreak\n\t\t} else {\n\t\t\tif errRead != nil {\n\t\t\t\tL.Error(\"Read from `%s' error: `%s'\", srcFullPath, errRead.Error())\n\t\t\t\tcpErr = errRead\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tnumWrite, errWrite := destFileH.Write(buffer[:numRead])\n\t\t\t\tif errWrite != nil {\n\t\t\t\t\tL.Error(\"Write to `%s' error: `%s'\", destFullPath, errWrite.Error())\n\t\t\t\t\tcpErr = errWrite\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcpNum += int64(numWrite)\n\t\t\t}\n\t\t}\n\t}\n\tdefer srcFileH.Close()\n\tdefer destFileH.Close()\n\tif cpErr != nil || cpNum != fsize {\n\t\tL.Error(\"Copy from `%s' to `%s' failed, error: `%s'\", srcFullPath, destFullPath, cpErr.Error())\n\t} else {\n\t\tL.Debug(\"Copy from `%s' to `%s' succcess\", srcFullPath, destFullPath)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (C) 2016 Red Hat, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage probes\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/skydive-project\/skydive\/analyzer\"\n\t\"github.com\/skydive-project\/skydive\/api\"\n\t\"github.com\/skydive-project\/skydive\/config\"\n\t\"github.com\/skydive-project\/skydive\/flow\"\n\t\"github.com\/skydive-project\/skydive\/flow\/mappings\"\n\t\"github.com\/skydive-project\/skydive\/logging\"\n\t\"github.com\/skydive-project\/skydive\/probe\"\n\t\"github.com\/skydive-project\/skydive\/topology\/graph\"\n)\n\ntype FlowProbeBundle struct {\n\tprobe.ProbeBundle\n\tGraph *graph.Graph\n\tFlowTableAllocator *flow.TableAllocator\n}\n\ntype FlowProbeInterface interface {\n\tprobe.Probe\n\tRegisterProbe(n *graph.Node, capture *api.Capture, ft *flow.Table) error\n\tUnregisterProbe(n *graph.Node) error\n}\n\ntype FlowProbe struct {\n\tsync.RWMutex\n\tfpi FlowProbeInterface\n\tpipeline *mappings.FlowMappingPipeline\n\tflowClientPool *analyzer.FlowClientPool\n}\n\nfunc (fp FlowProbe) Start() {\n\tfp.fpi.Start()\n}\n\nfunc (fp FlowProbe) Stop() {\n\tfp.fpi.Stop()\n}\n\nfunc (fp *FlowProbe) RegisterProbe(n *graph.Node, capture *api.Capture, ft *flow.Table) error {\n\treturn fp.fpi.RegisterProbe(n, capture, ft)\n}\n\nfunc (fp *FlowProbe) UnregisterProbe(n *graph.Node) error {\n\treturn fp.fpi.UnregisterProbe(n)\n}\n\nfunc (fp *FlowProbe) AsyncFlowPipeline(flows []*flow.Flow) {\n\tfp.pipeline.Enhance(flows)\n\n\tfp.RLock()\n\tdefer fp.RUnlock()\n\n\tfp.flowClientPool.SendFlows(flows)\n}\n\nfunc (fpb *FlowProbeBundle) UnregisterAllProbes() {\n\tfpb.Graph.Lock()\n\tdefer fpb.Graph.Unlock()\n\n\tfor _, n := range fpb.Graph.GetNodes(graph.Metadata{}) {\n\t\tfor _, p := range fpb.ProbeBundle.Probes {\n\t\t\tfprobe := p.(*FlowProbe)\n\t\t\tfprobe.UnregisterProbe(n)\n\t\t}\n\t}\n}\n\nfunc NewFlowProbeBundleFromConfig(tb *probe.ProbeBundle, g *graph.Graph, fta *flow.TableAllocator, fcpool *analyzer.FlowClientPool) *FlowProbeBundle {\n\tlist := config.GetConfig().GetStringSlice(\"agent.flow.probes\")\n\tlogging.GetLogger().Infof(\"Flow probes: %v\", list)\n\n\tpipeline := mappings.NewFlowMappingPipeline(mappings.NewGraphFlowEnhancer(g))\n\n\t\/\/ check that the neutron probe if loaded if so add the neutron flow enhancer\n\tif tb.GetProbe(\"neutron\") != nil {\n\t\tpipeline.AddEnhancer(mappings.NewNeutronFlowEnhancer(g))\n\t}\n\n\tvar captureTypes []string\n\tvar fpi FlowProbeInterface\n\tvar err error\n\n\tprobes := make(map[string]probe.Probe)\n\tfor _, t := range list {\n\t\tif _, ok := probes[t]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch t {\n\t\tcase \"pcapsocket\":\n\t\t\tfpi, err = NewPcapSocketProbeHandler(g)\n\t\t\tcaptureTypes = []string{\"pcapsocket\"}\n\t\tcase \"ovssflow\":\n\t\t\tfpi, err = NewOvsSFlowProbesHandler(tb, g)\n\t\t\tcaptureTypes = []string{\"ovssflow\"}\n\t\tcase \"gopacket\":\n\t\t\tfpi, err = NewGoPacketProbesHandler(g)\n\t\t\tcaptureTypes = []string{\"afpacket\", \"pcap\"}\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unknown probe type %s\", t)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogging.GetLogger().Errorf(\"failed to create %s probe: %s\", t, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tflowProbe := &FlowProbe{fpi: fpi, pipeline: pipeline, flowClientPool: fcpool}\n\t\tfor _, captureType := range captureTypes {\n\t\t\tprobes[captureType] = flowProbe\n\t\t}\n\t}\n\n\tp := probe.NewProbeBundle(probes)\n\n\treturn &FlowProbeBundle{\n\t\tProbeBundle: *p,\n\t\tGraph: g,\n\t\tFlowTableAllocator: fta,\n\t}\n}\n<commit_msg>flow : FlowProbe remove useless lock<commit_after>\/*\n * Copyright (C) 2016 Red Hat, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage probes\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/skydive-project\/skydive\/analyzer\"\n\t\"github.com\/skydive-project\/skydive\/api\"\n\t\"github.com\/skydive-project\/skydive\/config\"\n\t\"github.com\/skydive-project\/skydive\/flow\"\n\t\"github.com\/skydive-project\/skydive\/flow\/mappings\"\n\t\"github.com\/skydive-project\/skydive\/logging\"\n\t\"github.com\/skydive-project\/skydive\/probe\"\n\t\"github.com\/skydive-project\/skydive\/topology\/graph\"\n)\n\ntype FlowProbeBundle struct {\n\tprobe.ProbeBundle\n\tGraph *graph.Graph\n\tFlowTableAllocator *flow.TableAllocator\n}\n\ntype FlowProbeInterface interface {\n\tprobe.Probe\n\tRegisterProbe(n *graph.Node, capture *api.Capture, ft *flow.Table) error\n\tUnregisterProbe(n *graph.Node) error\n}\n\ntype FlowProbe struct {\n\tfpi FlowProbeInterface\n\tpipeline *mappings.FlowMappingPipeline\n\tflowClientPool *analyzer.FlowClientPool\n}\n\nfunc (fp *FlowProbe) Start() {\n\tfp.fpi.Start()\n}\n\nfunc (fp *FlowProbe) Stop() {\n\tfp.fpi.Stop()\n}\n\nfunc (fp *FlowProbe) RegisterProbe(n *graph.Node, capture *api.Capture, ft *flow.Table) error {\n\treturn fp.fpi.RegisterProbe(n, capture, ft)\n}\n\nfunc (fp *FlowProbe) UnregisterProbe(n *graph.Node) error {\n\treturn fp.fpi.UnregisterProbe(n)\n}\n\nfunc (fp *FlowProbe) AsyncFlowPipeline(flows []*flow.Flow) {\n\tfp.pipeline.Enhance(flows)\n\tfp.flowClientPool.SendFlows(flows)\n}\n\nfunc (fpb *FlowProbeBundle) UnregisterAllProbes() {\n\tfpb.Graph.Lock()\n\tdefer fpb.Graph.Unlock()\n\n\tfor _, n := range fpb.Graph.GetNodes(graph.Metadata{}) {\n\t\tfor _, p := range fpb.ProbeBundle.Probes {\n\t\t\tfprobe := p.(*FlowProbe)\n\t\t\tfprobe.UnregisterProbe(n)\n\t\t}\n\t}\n}\n\nfunc NewFlowProbeBundleFromConfig(tb *probe.ProbeBundle, g *graph.Graph, fta *flow.TableAllocator, fcpool *analyzer.FlowClientPool) *FlowProbeBundle {\n\tlist := config.GetConfig().GetStringSlice(\"agent.flow.probes\")\n\tlogging.GetLogger().Infof(\"Flow probes: %v\", list)\n\n\tpipeline := mappings.NewFlowMappingPipeline(mappings.NewGraphFlowEnhancer(g))\n\n\t\/\/ check that the neutron probe if loaded if so add the neutron flow enhancer\n\tif tb.GetProbe(\"neutron\") != nil {\n\t\tpipeline.AddEnhancer(mappings.NewNeutronFlowEnhancer(g))\n\t}\n\n\tvar captureTypes []string\n\tvar fpi FlowProbeInterface\n\tvar err error\n\n\tprobes := make(map[string]probe.Probe)\n\tfor _, t := range list {\n\t\tif _, ok := probes[t]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch t {\n\t\tcase \"pcapsocket\":\n\t\t\tfpi, err = NewPcapSocketProbeHandler(g)\n\t\t\tcaptureTypes = []string{\"pcapsocket\"}\n\t\tcase \"ovssflow\":\n\t\t\tfpi, err = NewOvsSFlowProbesHandler(tb, g)\n\t\t\tcaptureTypes = []string{\"ovssflow\"}\n\t\tcase \"gopacket\":\n\t\t\tfpi, err = NewGoPacketProbesHandler(g)\n\t\t\tcaptureTypes = []string{\"afpacket\", \"pcap\"}\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unknown probe type %s\", t)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogging.GetLogger().Errorf(\"failed to create %s probe: %s\", t, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tflowProbe := &FlowProbe{fpi: fpi, pipeline: pipeline, flowClientPool: fcpool}\n\t\tfor _, captureType := range captureTypes {\n\t\t\tprobes[captureType] = flowProbe\n\t\t}\n\t}\n\n\tp := probe.NewProbeBundle(probes)\n\n\treturn &FlowProbeBundle{\n\t\tProbeBundle: *p,\n\t\tGraph: g,\n\t\tFlowTableAllocator: fta,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package flying_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"syscall\"\n\n\t\"github.com\/concourse\/testflight\/helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"Flying\", func() {\n\tvar tmpdir string\n\tvar fixture string\n\n\tBeforeEach(func() {\n\t\tvar err error\n\n\t\ttmpdir, err = ioutil.TempDir(\"\", \"fly-test\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tfixture = filepath.Join(tmpdir, \"fixture\")\n\n\t\terr = os.MkdirAll(fixture, 0755)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = ioutil.WriteFile(\n\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t[]byte(`#!\/bin\/sh\necho some output\necho FOO is $FOO\necho ARGS are \"$@\"\nexit 0\n`),\n\t\t\t0755,\n\t\t)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = ioutil.WriteFile(\n\t\t\tfilepath.Join(fixture, \"task.yml\"),\n\t\t\t[]byte(`---\nplatform: linux\n\nimage_resource:\n type: docker-image\n source: {repository: busybox}\n\ninputs:\n- name: fixture\n\noutputs:\n- name: output-1\n- name: output-2\n\nparams:\n FOO: 1\n\nrun:\n path: fixture\/run\n`),\n\t\t\t0644,\n\t\t)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tAfterEach(func() {\n\t\tos.RemoveAll(tmpdir)\n\t})\n\n\tIt(\"works\", func() {\n\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"--\", \"SOME\", \"ARGS\")\n\t\tfly.Dir = fixture\n\n\t\tsession := helpers.StartFly(fly)\n\n\t\tEventually(session).Should(gexec.Exit(0))\n\n\t\tExpect(session).To(gbytes.Say(\"some output\"))\n\t\tExpect(session).To(gbytes.Say(\"FOO is 1\"))\n\t\tExpect(session).To(gbytes.Say(\"ARGS are SOME ARGS\"))\n\t})\n\n\tDescribe(\"hijacking\", func() {\n\t\tIt(\"executes an interactive command in a running task's container\", func() {\n\t\t\terr := ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\nmkfifo \/tmp\/fifo\necho waiting\ncat < \/tmp\/fifo\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\")\n\t\t\tfly.Dir = fixture\n\n\t\t\tflyS := helpers.StartFly(fly)\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"executing build\"))\n\n\t\t\tbuildRegex := regexp.MustCompile(`executing build (\\d+)`)\n\t\t\tmatches := buildRegex.FindSubmatch(flyS.Out.Contents())\n\t\t\tbuildID := string(matches[1])\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"waiting\"))\n\n\t\t\tenv := exec.Command(flyBin, \"-t\", targetedConcourse, \"hijack\", \"-b\", buildID, \"-s\", \"one-off\", \"--\", \"env\")\n\t\t\tenvS := helpers.StartFly(env)\n\t\t\t<-envS.Exited\n\t\t\tExpect(envS.ExitCode()).To(Equal(0))\n\t\t\tExpect(envS.Out).To(gbytes.Say(\"FOO=1\"))\n\n\t\t\thijack := exec.Command(flyBin, \"-t\", targetedConcourse, \"hijack\", \"-b\", buildID, \"-s\", \"one-off\", \"--\", \"sh\", \"-c\", \"echo marco > \/tmp\/fifo\")\n\t\t\thijackS := helpers.StartFly(hijack)\n\t\t\tEventually(flyS).Should(gbytes.Say(\"marco\"))\n\t\t\tEventually(hijackS).Should(gexec.Exit())\n\t\t\tEventually(flyS).Should(gexec.Exit(0))\n\t\t})\n\t})\n\n\tDescribe(\"pulling down outputs\", func() {\n\t\tIt(\"works\", func() {\n\t\t\terr := ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\necho hello > output-1\/file-1\necho world > output-2\/file-2\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"-o\", \"output-1=.\/output-1\", \"-o\", \"output-2=.\/output-2\")\n\t\t\tfly.Dir = fixture\n\n\t\t\tsession := helpers.StartFly(fly)\n\t\t\t<-session.Exited\n\n\t\t\tExpect(session.ExitCode()).To(Equal(0))\n\n\t\t\tfile1 := filepath.Join(fixture, \"output-1\", \"file-1\")\n\t\t\tfile2 := filepath.Join(fixture, \"output-2\", \"file-2\")\n\n\t\t\tExpect(ioutil.ReadFile(file1)).To(Equal([]byte(\"hello\\n\")))\n\t\t\tExpect(ioutil.ReadFile(file2)).To(Equal([]byte(\"world\\n\")))\n\t\t})\n\t})\n\n\tDescribe(\"aborting\", func() {\n\t\tIt(\"terminates the running task\", func() {\n\t\t\terr := ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\ntrap \"echo task got sigterm; exit 1\" SIGTERM\nsleep 1000 &\necho waiting\nwait\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\")\n\t\t\tfly.Dir = fixture\n\n\t\t\tflyS := helpers.StartFly(fly)\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"waiting\"))\n\n\t\t\tflyS.Signal(syscall.SIGTERM)\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"task got sigterm\"))\n\n\t\t\t\/\/ build should have been aborted\n\t\t\tEventually(flyS).Should(gexec.Exit(3))\n\t\t})\n\t})\n})\n<commit_msg>Add test to verify files being uploaded during fly execute<commit_after>package flying_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"syscall\"\n\n\t\"github.com\/concourse\/testflight\/helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"Flying\", func() {\n\tvar tmpdir string\n\tvar fixture, input1, input2 string\n\n\tBeforeEach(func() {\n\t\tvar err error\n\n\t\ttmpdir, err = ioutil.TempDir(\"\", \"fly-test\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tfixture = filepath.Join(tmpdir, \"fixture\")\n\t\tinput1 = filepath.Join(tmpdir, \"input-1\")\n\t\tinput2 = filepath.Join(tmpdir, \"input-2\")\n\n\t\terr = os.MkdirAll(fixture, 0755)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = os.MkdirAll(input1, 0755)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = os.MkdirAll(input2, 0755)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = ioutil.WriteFile(\n\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t[]byte(`#!\/bin\/sh\necho some output\necho FOO is $FOO\necho ARGS are \"$@\"\nexit 0\n`),\n\t\t\t0755,\n\t\t)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = ioutil.WriteFile(\n\t\t\tfilepath.Join(tmpdir, \"task.yml\"),\n\t\t\t[]byte(`---\nplatform: linux\n\nimage_resource:\n type: docker-image\n source: {repository: busybox}\n\ninputs:\n- name: fixture\n- name: input-1\n- name: input-2\n\noutputs:\n- name: output-1\n- name: output-2\n\nparams:\n FOO: 1\n\nrun:\n path: fixture\/run\n`),\n\t\t\t0644,\n\t\t)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tAfterEach(func() {\n\t\tos.RemoveAll(tmpdir)\n\t})\n\n\tIt(\"works\", func() {\n\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"-i\", \"fixture=.\/fixture\", \"-i\", \"input-1=.\/input-1\", \"-i\", \"input-2=.\/input-2\", \"--\", \"SOME\", \"ARGS\")\n\t\tfly.Dir = tmpdir\n\n\t\tsession := helpers.StartFly(fly)\n\n\t\tEventually(session).Should(gexec.Exit(0))\n\n\t\tExpect(session).To(gbytes.Say(\"some output\"))\n\t\tExpect(session).To(gbytes.Say(\"FOO is 1\"))\n\t\tExpect(session).To(gbytes.Say(\"ARGS are SOME ARGS\"))\n\t})\n\n\tDescribe(\"hijacking\", func() {\n\t\tIt(\"executes an interactive command in a running task's container\", func() {\n\t\t\terr := ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\nmkfifo \/tmp\/fifo\necho waiting\ncat < \/tmp\/fifo\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"-i\", \"fixture=.\/fixture\", \"-i\", \"input-1=.\/input-1\", \"-i\", \"input-2=.\/input-2\")\n\t\t\tfly.Dir = tmpdir\n\n\t\t\tflyS := helpers.StartFly(fly)\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"executing build\"))\n\n\t\t\tbuildRegex := regexp.MustCompile(`executing build (\\d+)`)\n\t\t\tmatches := buildRegex.FindSubmatch(flyS.Out.Contents())\n\t\t\tbuildID := string(matches[1])\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"waiting\"))\n\n\t\t\tenv := exec.Command(flyBin, \"-t\", targetedConcourse, \"hijack\", \"-b\", buildID, \"-s\", \"one-off\", \"--\", \"env\")\n\t\t\tenvS := helpers.StartFly(env)\n\t\t\t<-envS.Exited\n\t\t\tExpect(envS.ExitCode()).To(Equal(0))\n\t\t\tExpect(envS.Out).To(gbytes.Say(\"FOO=1\"))\n\n\t\t\thijack := exec.Command(flyBin, \"-t\", targetedConcourse, \"hijack\", \"-b\", buildID, \"-s\", \"one-off\", \"--\", \"sh\", \"-c\", \"echo marco > \/tmp\/fifo\")\n\t\t\thijackS := helpers.StartFly(hijack)\n\t\t\tEventually(flyS).Should(gbytes.Say(\"marco\"))\n\t\t\tEventually(hijackS).Should(gexec.Exit())\n\t\t\tEventually(flyS).Should(gexec.Exit(0))\n\t\t})\n\t})\n\n\tDescribe(\"uploading inputs with and without -x\", func() {\n\t\tBeforeEach(func() {\n\t\t\tgitIgnorePath := filepath.Join(input1, \".gitignore\")\n\n\t\t\terr := ioutil.WriteFile(gitIgnorePath, []byte(`*.exist`), 0644)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfileToBeIgnoredPath := filepath.Join(input1, \"expect-not-to.exist\")\n\t\t\terr = ioutil.WriteFile(fileToBeIgnoredPath, []byte(`ignored file content`), 0644)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfileToBeIncludedPath := filepath.Join(input2, \"expect-to.exist\")\n\t\t\terr = ioutil.WriteFile(fileToBeIncludedPath, []byte(`included file content`), 0644)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfile1 := filepath.Join(input1, \"file-1\")\n\t\t\terr = ioutil.WriteFile(file1, []byte(`file-1 contents`), 0644)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfile2 := filepath.Join(input2, \"file-2\")\n\t\t\terr = ioutil.WriteFile(file2, []byte(`file-2 contents`), 0644)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\terr = os.Mkdir(filepath.Join(input1, \".git\"), 0755)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\terr = os.Mkdir(filepath.Join(input1, \".git\/refs\"), 0755)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\terr = os.Mkdir(filepath.Join(input1, \".git\/objects\"), 0755)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tgitHEADPath := filepath.Join(input1, \".git\/HEAD\")\n\t\t\terr = ioutil.WriteFile(gitHEADPath, []byte(`ref: refs\/heads\/master`), 0644)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\terr = ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\ncp -a input-1\/. output-1\/\ncp -a input-2\/. output-2\/\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tIt(\"uploads git repo input and non git repo input, IGNORING things in the .gitignore for git repo inputs\", func() {\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"-i\", \"fixture=.\/fixture\", \"-i\", \"input-1=.\/input-1\", \"-i\", \"input-2=.\/input-2\", \"-o\", \"output-1=.\/output-1\", \"-o\", \"output-2=.\/output-2\")\n\t\t\tfly.Dir = tmpdir\n\n\t\t\tsession := helpers.StartFly(fly)\n\t\t\t<-session.Exited\n\n\t\t\tExpect(session.ExitCode()).To(Equal(0))\n\n\t\t\tfileToBeIgnoredPath := filepath.Join(tmpdir, \"output-1\", \"expect-not-to.exist\")\n\t\t\tfileToBeIncludedPath := filepath.Join(tmpdir, \"output-2\", \"expect-to.exist\")\n\t\t\tfile1 := filepath.Join(tmpdir, \"output-1\", \"file-1\")\n\t\t\tfile2 := filepath.Join(tmpdir, \"output-2\", \"file-2\")\n\n\t\t\t_, err := ioutil.ReadFile(fileToBeIgnoredPath)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(ioutil.ReadFile(fileToBeIncludedPath)).To(Equal([]byte(\"included file content\")))\n\t\t\tExpect(ioutil.ReadFile(file1)).To(Equal([]byte(\"file-1 contents\")))\n\t\t\tExpect(ioutil.ReadFile(file2)).To(Equal([]byte(\"file-2 contents\")))\n\t\t})\n\n\t\tIt(\"uploads git repo input and non git repo input, INCLUDING things in the .gitignore for git repo inputs\", func() {\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-x\", \"-c\", \"task.yml\", \"-i\", \"fixture=.\/fixture\", \"-i\", \"input-1=.\/input-1\", \"-i\", \"input-2=.\/input-2\", \"-o\", \"output-1=.\/output-1\", \"-o\", \"output-2=.\/output-2\")\n\t\t\tfly.Dir = tmpdir\n\n\t\t\tsession := helpers.StartFly(fly)\n\t\t\t<-session.Exited\n\n\t\t\tExpect(session.ExitCode()).To(Equal(0))\n\n\t\t\tfileToBeIgnoredPath := filepath.Join(tmpdir, \"output-1\", \"expect-not-to.exist\")\n\t\t\tfileToBeIncludedPath := filepath.Join(tmpdir, \"output-2\", \"expect-to.exist\")\n\t\t\tfile1 := filepath.Join(tmpdir, \"output-1\", \"file-1\")\n\t\t\tfile2 := filepath.Join(tmpdir, \"output-2\", \"file-2\")\n\n\t\t\tExpect(ioutil.ReadFile(fileToBeIgnoredPath)).To(Equal([]byte(\"ignored file content\")))\n\t\t\tExpect(ioutil.ReadFile(fileToBeIncludedPath)).To(Equal([]byte(\"included file content\")))\n\t\t\tExpect(ioutil.ReadFile(file1)).To(Equal([]byte(\"file-1 contents\")))\n\t\t\tExpect(ioutil.ReadFile(file2)).To(Equal([]byte(\"file-2 contents\")))\n\t\t})\n\t})\n\n\tDescribe(\"pulling down outputs\", func() {\n\t\tIt(\"works\", func() {\n\t\t\terr := ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\necho hello > output-1\/file-1\necho world > output-2\/file-2\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"-i\", \"fixture=.\/fixture\", \"-i\", \"input-1=.\/input-1\", \"-i\", \"input-2=.\/input-2\", \"-o\", \"output-1=.\/output-1\", \"-o\", \"output-2=.\/output-2\")\n\t\t\tfly.Dir = tmpdir\n\n\t\t\tsession := helpers.StartFly(fly)\n\t\t\t<-session.Exited\n\n\t\t\tExpect(session.ExitCode()).To(Equal(0))\n\n\t\t\tfile1 := filepath.Join(tmpdir, \"output-1\", \"file-1\")\n\t\t\tfile2 := filepath.Join(tmpdir, \"output-2\", \"file-2\")\n\n\t\t\tExpect(ioutil.ReadFile(file1)).To(Equal([]byte(\"hello\\n\")))\n\t\t\tExpect(ioutil.ReadFile(file2)).To(Equal([]byte(\"world\\n\")))\n\t\t})\n\t})\n\n\tDescribe(\"aborting\", func() {\n\t\tIt(\"terminates the running task\", func() {\n\t\t\terr := ioutil.WriteFile(\n\t\t\t\tfilepath.Join(fixture, \"run\"),\n\t\t\t\t[]byte(`#!\/bin\/sh\ntrap \"echo task got sigterm; exit 1\" SIGTERM\nsleep 1000 &\necho waiting\nwait\n`),\n\t\t\t\t0755,\n\t\t\t)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfly := exec.Command(flyBin, \"-t\", targetedConcourse, \"execute\", \"-c\", \"task.yml\", \"-i\", \"fixture=.\/fixture\", \"-i\", \"input-1=.\/input-1\", \"-i\", \"input-2=.\/input-2\")\n\t\t\tfly.Dir = tmpdir\n\n\t\t\tflyS := helpers.StartFly(fly)\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"waiting\"))\n\n\t\t\tflyS.Signal(syscall.SIGTERM)\n\n\t\t\tEventually(flyS).Should(gbytes.Say(\"task got sigterm\"))\n\n\t\t\t\/\/ build should have been aborted\n\t\t\tEventually(flyS).Should(gexec.Exit(3))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package qshell\n\nimport (\n\t\"bufio\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/qiniu\/api\/auth\/digest\"\n\tfio \"github.com\/qiniu\/api\/io\"\n\trio \"github.com\/qiniu\/api\/resumable\/io\"\n\t\"github.com\/qiniu\/api\/rs\"\n\t\"github.com\/qiniu\/log\"\n\t\"github.com\/syndtr\/goleveldb\/leveldb\"\n\t\"github.com\/syndtr\/goleveldb\/leveldb\/opt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/*\nConfig file like:\n\n{\n\t\"src_dir\" \t\t:\t\"\/Users\/jemy\/Photos\",\n\t\"access_key\" \t:\t\"<Your AccessKey>\",\n\t\"secret_key\"\t:\t\"<Your SecretKey>\",\n\t\"bucket\"\t\t:\t\"test-bucket\",\n\t\"ignore_dir\"\t:\tfalse,\n\t\"key_prefix\"\t:\t\"2014\/12\/01\/\",\n\t\"overwrite\"\t\t:\tfalse\n}\n\nor without key_prefix and ignore_dir\n\n{\n\t\"src_dir\" \t\t:\t\"\/Users\/jemy\/Photos\",\n\t\"access_key\" \t:\t\"<Your AccessKey>\",\n\t\"secret_key\"\t:\t\"<Your SecretKey>\",\n\t\"bucket\"\t\t:\t\"test-bucket\",\n}\n*\/\n\nconst (\n\tPUT_THRESHOLD int64 = 100 * 1 << 20\n\tMIN_UPLOAD_THREAD_COUNT int64 = 1\n\tMAX_UPLOAD_THREAD_COUNT int64 = 100\n)\n\ntype UploadConfig struct {\n\tSrcDir string `json:\"src_dir\"`\n\tAccessKey string `json:\"access_key\"`\n\tSecretKey string `json:\"secret_key\"`\n\tBucket string `json:\"bucket\"`\n\tKeyPrefix string `json:\"key_prefix,omitempty\"`\n\tIgnoreDir bool `json:\"ignore_dir,omitempty\"`\n\tOverwrite bool `json:\"overwrite,omitempty\"`\n}\n\nfunc QiniuUpload(threadCount int, uploadConfigFile string) {\n\tfp, err := os.Open(uploadConfigFile)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Open upload config file `%s' error due to `%s'\", uploadConfigFile, err))\n\t\treturn\n\t}\n\tdefer fp.Close()\n\tconfigData, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Read upload config file `%s' error due to `%s'\", uploadConfigFile, err))\n\t\treturn\n\t}\n\tvar uploadConfig UploadConfig\n\terr = json.Unmarshal(configData, &uploadConfig)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Parse upload config file `%s' errror due to `%s'\", uploadConfigFile, err))\n\t\treturn\n\t}\n\tif _, err := os.Stat(uploadConfig.SrcDir); err != nil {\n\t\tlog.Error(\"Upload config error for parameter `SrcDir`,\", err)\n\t\treturn\n\t}\n\tdirCache := DirCache{}\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\tlog.Error(\"Failed to get current user\", err)\n\t\treturn\n\t}\n\tjobId := base64.URLEncoding.EncodeToString([]byte(uploadConfig.SrcDir + \":\" + uploadConfig.Bucket))\n\tstorePath := fmt.Sprintf(\"%s\/.qshell\/qupload\/%s\", currentUser.HomeDir, jobId)\n\terr = os.MkdirAll(storePath, 0775)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Failed to mkdir `%s' due to `%s'\", storePath, err))\n\t\treturn\n\t}\n\tcacheFileName := fmt.Sprintf(\"%s\/%s.cache\", storePath, jobId)\n\tleveldbFileName := fmt.Sprintf(\"%s\/%s.ldb\", storePath, jobId)\n\ttotalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)\n\tldb, err := leveldb.OpenFile(leveldbFileName, nil)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Open leveldb `%s' failed due to `%s'\", leveldbFileName, err))\n\t\treturn\n\t}\n\tdefer ldb.Close()\n\t\/\/sync\n\tufp, err := os.Open(cacheFileName)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Open cache file `%s' failed due to `%s'\", cacheFileName, err))\n\t\treturn\n\t}\n\tdefer ufp.Close()\n\tbScanner := bufio.NewScanner(ufp)\n\tbScanner.Split(bufio.ScanLines)\n\tcurrentFileCount := 0\n\tldbWOpt := opt.WriteOptions{\n\t\tSync: true,\n\t}\n\n\tupWorkGroup := sync.WaitGroup{}\n\tupCounter := 0\n\tthreadThreshold := threadCount + 1\n\n\tmac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}\n\t\/\/check thread count\n\tfor bScanner.Scan() {\n\t\tline := strings.TrimSpace(bScanner.Text())\n\t\titems := strings.Split(line, \"\\t\")\n\t\tif len(items) > 1 {\n\t\t\tcacheFname := items[0]\n\t\t\tcacheFlmd, _ := strconv.Atoi(items[2])\n\t\t\tuploadFileKey := cacheFname\n\t\t\tif uploadConfig.IgnoreDir {\n\t\t\t\tif i := strings.LastIndex(uploadFileKey, string(os.PathSeparator)); i != -1 {\n\t\t\t\t\tuploadFileKey = uploadFileKey[i+1:]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif uploadConfig.KeyPrefix != \"\" {\n\t\t\t\tuploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, \"\")\n\t\t\t}\n\t\t\t\/\/convert \\ to \/ under windows\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\tuploadFileKey = strings.Replace(uploadFileKey, \"\\\\\", \"\/\", -1)\n\t\t\t}\n\t\t\tcacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, string(os.PathSeparator))\n\t\t\tfstat, err := os.Stat(cacheFilePath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(fmt.Sprintf(\"Error stat local file `%s' due to `%s'\", cacheFilePath, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfsize := fstat.Size()\n\n\t\t\t\/\/check leveldb\n\t\t\tcurrentFileCount += 1\n\t\t\tldbKey := fmt.Sprintf(\"%s => %s\", cacheFilePath, uploadFileKey)\n\t\t\tlog.Debug(fmt.Sprintf(\"Checking %s ...\", ldbKey))\n\t\t\t\/\/check last modified\n\t\t\tldbFlmd, err := ldb.Get([]byte(ldbKey), nil)\n\t\t\tflmd, _ := strconv.Atoi(string(ldbFlmd))\n\t\t\t\/\/not exist, return ErrNotFound\n\t\t\tif err == nil && cacheFlmd == flmd {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Print(\"\\033[2K\\r\")\n\t\t\tfmt.Printf(\"Uploading %s (%d\/%d, %.0f%%) ...\", ldbKey, currentFileCount, totalFileCount,\n\t\t\t\tfloat32(currentFileCount)*100\/float32(totalFileCount))\n\t\t\tos.Stdout.Sync()\n\t\t\t\/\/worker\n\t\t\tupCounter += 1\n\t\t\tif upCounter%threadThreshold == 0 {\n\t\t\t\tupWorkGroup.Wait()\n\t\t\t}\n\t\t\tupWorkGroup.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer upWorkGroup.Done()\n\n\t\t\t\tpolicy := rs.PutPolicy{}\n\t\t\t\tpolicy.Scope = uploadConfig.Bucket\n\t\t\t\tif uploadConfig.Overwrite {\n\t\t\t\t\tpolicy.Scope = uploadConfig.Bucket + \":\" + uploadFileKey\n\t\t\t\t\tpolicy.InsertOnly = 0\n\t\t\t\t}\n\t\t\t\tpolicy.Expires = 24 * 3600\n\t\t\t\tuptoken := policy.Token(&mac)\n\t\t\t\tif fsize > PUT_THRESHOLD {\n\t\t\t\t\tputRet := rio.PutRet{}\n\t\t\t\t\terr := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put file `%s' => `%s' failed due to `%s'\", cacheFilePath, uploadFileKey, err))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tperr := ldb.Put([]byte(ldbKey), []byte(\"Y\"), &ldbWOpt)\n\t\t\t\t\t\tif perr != nil {\n\t\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put key `%s' into leveldb error due to `%s'\", ldbKey, perr))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tputRet := fio.PutRet{}\n\t\t\t\t\terr := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put file `%s' => `%s' failed due to `%s'\", cacheFilePath, uploadFileKey, err))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tperr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt)\n\t\t\t\t\t\tif perr != nil {\n\t\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put key `%s' into leveldb error due to `%s'\", ldbKey, perr))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\tlog.Error(fmt.Sprintf(\"Error cache line `%s'\", line))\n\t\t}\n\t}\n\tupWorkGroup.Wait()\n\tfmt.Println()\n\tfmt.Println(\"Upload done!\")\n}\n<commit_msg>Use os.Separator to be compatible under windows.<commit_after>package qshell\n\nimport (\n\t\"bufio\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/qiniu\/api\/auth\/digest\"\n\tfio \"github.com\/qiniu\/api\/io\"\n\trio \"github.com\/qiniu\/api\/resumable\/io\"\n\t\"github.com\/qiniu\/api\/rs\"\n\t\"github.com\/qiniu\/log\"\n\t\"github.com\/syndtr\/goleveldb\/leveldb\"\n\t\"github.com\/syndtr\/goleveldb\/leveldb\/opt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/*\nConfig file like:\n\n{\n\t\"src_dir\" \t\t:\t\"\/Users\/jemy\/Photos\",\n\t\"access_key\" \t:\t\"<Your AccessKey>\",\n\t\"secret_key\"\t:\t\"<Your SecretKey>\",\n\t\"bucket\"\t\t:\t\"test-bucket\",\n\t\"ignore_dir\"\t:\tfalse,\n\t\"key_prefix\"\t:\t\"2014\/12\/01\/\",\n\t\"overwrite\"\t\t:\tfalse\n}\n\nor without key_prefix and ignore_dir\n\n{\n\t\"src_dir\" \t\t:\t\"\/Users\/jemy\/Photos\",\n\t\"access_key\" \t:\t\"<Your AccessKey>\",\n\t\"secret_key\"\t:\t\"<Your SecretKey>\",\n\t\"bucket\"\t\t:\t\"test-bucket\",\n}\n*\/\n\nconst (\n\tPUT_THRESHOLD int64 = 100 * 1 << 20\n\tMIN_UPLOAD_THREAD_COUNT int64 = 1\n\tMAX_UPLOAD_THREAD_COUNT int64 = 100\n)\n\ntype UploadConfig struct {\n\tSrcDir string `json:\"src_dir\"`\n\tAccessKey string `json:\"access_key\"`\n\tSecretKey string `json:\"secret_key\"`\n\tBucket string `json:\"bucket\"`\n\tKeyPrefix string `json:\"key_prefix,omitempty\"`\n\tIgnoreDir bool `json:\"ignore_dir,omitempty\"`\n\tOverwrite bool `json:\"overwrite,omitempty\"`\n}\n\nfunc QiniuUpload(threadCount int, uploadConfigFile string) {\n\tfp, err := os.Open(uploadConfigFile)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Open upload config file `%s' error due to `%s'\", uploadConfigFile, err))\n\t\treturn\n\t}\n\tdefer fp.Close()\n\tconfigData, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Read upload config file `%s' error due to `%s'\", uploadConfigFile, err))\n\t\treturn\n\t}\n\tvar uploadConfig UploadConfig\n\terr = json.Unmarshal(configData, &uploadConfig)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Parse upload config file `%s' errror due to `%s'\", uploadConfigFile, err))\n\t\treturn\n\t}\n\tif _, err := os.Stat(uploadConfig.SrcDir); err != nil {\n\t\tlog.Error(\"Upload config error for parameter `SrcDir`,\", err)\n\t\treturn\n\t}\n\tdirCache := DirCache{}\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\tlog.Error(\"Failed to get current user\", err)\n\t\treturn\n\t}\n\tpathSep:=string(os.PathSeparator)\n\tjobId := base64.URLEncoding.EncodeToString([]byte(uploadConfig.SrcDir + \":\" + uploadConfig.Bucket))\n\tstorePath := fmt.Sprintf(\"%s%s.qshell%squpload%s%s\", currentUser.HomeDir,pathSep,pathSep, pathSep,jobId)\n\terr = os.MkdirAll(storePath, 0775)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Failed to mkdir `%s' due to `%s'\", storePath, err))\n\t\treturn\n\t}\n\tcacheFileName := fmt.Sprintf(\"%s%s%s.cache\", storePath,pathSep, jobId)\n\tleveldbFileName := fmt.Sprintf(\"%s%s%s.ldb\", storePath,pathSep, jobId)\n\ttotalFileCount := dirCache.Cache(uploadConfig.SrcDir, cacheFileName)\n\tldb, err := leveldb.OpenFile(leveldbFileName, nil)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Open leveldb `%s' failed due to `%s'\", leveldbFileName, err))\n\t\treturn\n\t}\n\tdefer ldb.Close()\n\t\/\/sync\n\tufp, err := os.Open(cacheFileName)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Open cache file `%s' failed due to `%s'\", cacheFileName, err))\n\t\treturn\n\t}\n\tdefer ufp.Close()\n\tbScanner := bufio.NewScanner(ufp)\n\tbScanner.Split(bufio.ScanLines)\n\tcurrentFileCount := 0\n\tldbWOpt := opt.WriteOptions{\n\t\tSync: true,\n\t}\n\n\tupWorkGroup := sync.WaitGroup{}\n\tupCounter := 0\n\tthreadThreshold := threadCount + 1\n\n\tmac := digest.Mac{uploadConfig.AccessKey, []byte(uploadConfig.SecretKey)}\n\t\/\/check thread count\n\tfor bScanner.Scan() {\n\t\tline := strings.TrimSpace(bScanner.Text())\n\t\titems := strings.Split(line, \"\\t\")\n\t\tif len(items) > 1 {\n\t\t\tcacheFname := items[0]\n\t\t\tcacheFlmd, _ := strconv.Atoi(items[2])\n\t\t\tuploadFileKey := cacheFname\n\t\t\tif uploadConfig.IgnoreDir {\n\t\t\t\tif i := strings.LastIndex(uploadFileKey, pathSep); i != -1 {\n\t\t\t\t\tuploadFileKey = uploadFileKey[i+1:]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif uploadConfig.KeyPrefix != \"\" {\n\t\t\t\tuploadFileKey = strings.Join([]string{uploadConfig.KeyPrefix, uploadFileKey}, \"\")\n\t\t\t}\n\t\t\t\/\/convert \\ to \/ under windows\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\tuploadFileKey = strings.Replace(uploadFileKey, \"\\\\\", \"\/\", -1)\n\t\t\t}\n\t\t\tcacheFilePath := strings.Join([]string{uploadConfig.SrcDir, cacheFname}, pathSep)\n\t\t\tfstat, err := os.Stat(cacheFilePath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(fmt.Sprintf(\"Error stat local file `%s' due to `%s'\", cacheFilePath, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfsize := fstat.Size()\n\n\t\t\t\/\/check leveldb\n\t\t\tcurrentFileCount += 1\n\t\t\tldbKey := fmt.Sprintf(\"%s => %s\", cacheFilePath, uploadFileKey)\n\t\t\tlog.Debug(fmt.Sprintf(\"Checking %s ...\", ldbKey))\n\t\t\t\/\/check last modified\n\t\t\tldbFlmd, err := ldb.Get([]byte(ldbKey), nil)\n\t\t\tflmd, _ := strconv.Atoi(string(ldbFlmd))\n\t\t\t\/\/not exist, return ErrNotFound\n\t\t\tif err == nil && cacheFlmd == flmd {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Print(\"\\033[2K\\r\")\n\t\t\tfmt.Printf(\"Uploading %s (%d\/%d, %.0f%%) ...\", ldbKey, currentFileCount, totalFileCount,\n\t\t\t\tfloat32(currentFileCount)*100\/float32(totalFileCount))\n\t\t\tos.Stdout.Sync()\n\t\t\t\/\/worker\n\t\t\tupCounter += 1\n\t\t\tif upCounter%threadThreshold == 0 {\n\t\t\t\tupWorkGroup.Wait()\n\t\t\t}\n\t\t\tupWorkGroup.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer upWorkGroup.Done()\n\n\t\t\t\tpolicy := rs.PutPolicy{}\n\t\t\t\tpolicy.Scope = uploadConfig.Bucket\n\t\t\t\tif uploadConfig.Overwrite {\n\t\t\t\t\tpolicy.Scope = uploadConfig.Bucket + \":\" + uploadFileKey\n\t\t\t\t\tpolicy.InsertOnly = 0\n\t\t\t\t}\n\t\t\t\tpolicy.Expires = 24 * 3600\n\t\t\t\tuptoken := policy.Token(&mac)\n\t\t\t\tif fsize > PUT_THRESHOLD {\n\t\t\t\t\tputRet := rio.PutRet{}\n\t\t\t\t\terr := rio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put file `%s' => `%s' failed due to `%s'\", cacheFilePath, uploadFileKey, err))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tperr := ldb.Put([]byte(ldbKey), []byte(\"Y\"), &ldbWOpt)\n\t\t\t\t\t\tif perr != nil {\n\t\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put key `%s' into leveldb error due to `%s'\", ldbKey, perr))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tputRet := fio.PutRet{}\n\t\t\t\t\terr := fio.PutFile(nil, &putRet, uptoken, uploadFileKey, cacheFilePath, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put file `%s' => `%s' failed due to `%s'\", cacheFilePath, uploadFileKey, err))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tperr := ldb.Put([]byte(ldbKey), []byte(strconv.Itoa(cacheFlmd)), &ldbWOpt)\n\t\t\t\t\t\tif perr != nil {\n\t\t\t\t\t\t\tlog.Error(fmt.Sprintf(\"Put key `%s' into leveldb error due to `%s'\", ldbKey, perr))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\tlog.Error(fmt.Sprintf(\"Error cache line `%s'\", line))\n\t\t}\n\t}\n\tupWorkGroup.Wait()\n\tfmt.Println()\n\tfmt.Println(\"Upload done!\")\n}\n<|endoftext|>"} {"text":"<commit_before>package httpclient\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/quan-xie\/tuba\/util\/retry\"\n\t\"github.com\/quan-xie\/tuba\/util\/xtime\"\n)\n\nconst (\n\tminRead = 16 * 1024 \/\/ 16kb\n\tdefaultRetryCount int = 0\n)\n\ntype Config struct {\n\tDial xtime.Duration\n\tTimeout xtime.Duration\n\tKeepAlive xtime.Duration\n\tretryCount int\n}\n\ntype HttpClient struct {\n\tconf *Config\n\tclient *http.Client\n\tdialer *net.Dialer\n\ttransport *http.Transport\n\tretryCount int\n\tretrier retry.Retriable\n}\n\n\/\/ NewHTTPClient returns a new instance of httpClient\nfunc NewHTTPClient(c *Config) *HttpClient {\n\tdialer := &net.Dialer{\n\t\tTimeout: time.Duration(c.Dial),\n\t\tKeepAlive: time.Duration(c.KeepAlive),\n\t}\n\ttransport := &http.Transport{\n\t\tDialContext: dialer.DialContext,\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\treturn &HttpClient{\n\t\tconf: c,\n\t\tclient: &http.Client{\n\t\t\tTransport: transport,\n\t\t},\n\t\tretryCount: defaultRetryCount,\n\t\tretrier: retry.NewNoRetrier(),\n\t}\n}\n\n\/\/ SetRetryCount sets the retry count for the httpClient\nfunc (c *HttpClient) SetRetryCount(count int) {\n\tc.retryCount = count\n}\n\n\/\/ SetRetryCount sets the retry count for the httpClient\nfunc (c *HttpClient) SetRetrier(retrier retry.Retriable) {\n\tc.retrier = retrier\n}\n\n\/\/ Get makes a HTTP GET request to provided URL with context passed in\nfunc (c *HttpClient) Get(ctx context.Context, url string, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"GET - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Post makes a HTTP POST request to provided URL with context passed in\nfunc (c *HttpClient) Post(ctx context.Context, url string, body io.Reader, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodPost, url, body)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"POST - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Put makes a HTTP PUT request to provided URL with context passed in\nfunc (c *HttpClient) Put(ctx context.Context, url string, body io.Reader, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodPut, url, body)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"PUT - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Patch makes a HTTP PATCH request to provided URL with context passed in\nfunc (c *HttpClient) Patch(ctx context.Context, url string, body io.Reader, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodPatch, url, body)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"PATCH - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Delete makes a HTTP DELETE request to provided URL with context passed in\nfunc (c *HttpClient) Delete(ctx context.Context, url string, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodDelete, url, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"DELETE - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Do makes an HTTP request with the native `http.Do` interface and context passed in\nfunc (c *HttpClient) Do(ctx context.Context, req *http.Request, res interface{}) (err error) {\n\tfor i := 0; i <= c.retryCount; i++ {\n\t\tif err = c.request(ctx, req, res); err != nil {\n\t\t\terr = errors.Wrap(err, \"request - request failed\")\n\t\t\tbackoffTime := c.retrier.NextInterval(i)\n\t\t\ttime.Sleep(backoffTime)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\nfunc (c *HttpClient) request(ctx context.Context, req *http.Request, res interface{}) (err error) {\n\tvar (\n\t\tresponse *http.Response\n\t\tbs []byte\n\t\tcancel func()\n\t)\n\tctx, cancel = context.WithTimeout(ctx, time.Duration(c.conf.Timeout))\n\tdefer cancel()\n\tresponse, err = c.client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t}\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode >= http.StatusInternalServerError {\n\t\treturn\n\t}\n\tbs, err = readAll(response.Body, minRead)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"readAll - readAll failed\")\n\t\treturn err\n\t}\n\tif res != nil {\n\t\tif err = json.Unmarshal(bs, res); err != nil {\n\t\t\terr = errors.Wrap(err, \"Unmarshal failed\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn\n}\n\nfunc readAll(r io.Reader, capacity int64) (b []byte, err error) {\n\tbuf := bytes.NewBuffer(make([]byte, 0, capacity))\n\t\/\/ If the buffer overflows, we will get bytes.ErrTooLarge.\n\t\/\/ Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(r)\n\treturn buf.Bytes(), err\n}\n<commit_msg>format code<commit_after>package httpclient\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/quan-xie\/tuba\/util\/retry\"\n\t\"github.com\/quan-xie\/tuba\/util\/xtime\"\n)\n\nconst (\n\tminRead = 16 * 1024 \/\/ 16kb\n\tdefaultRetryCount int = 0\n)\n\ntype Config struct {\n\tDial xtime.Duration\n\tTimeout xtime.Duration\n\tKeepAlive xtime.Duration\n\tretryCount int\n}\n\ntype HttpClient struct {\n\tconf *Config\n\tclient *http.Client\n\tdialer *net.Dialer\n\ttransport *http.Transport\n\tretryCount int\n\tretrier retry.Retriable\n}\n\n\/\/ NewHTTPClient returns a new instance of httpClient\nfunc NewHTTPClient(c *Config) *HttpClient {\n\tdialer := &net.Dialer{\n\t\tTimeout: time.Duration(c.Dial),\n\t\tKeepAlive: time.Duration(c.KeepAlive),\n\t}\n\ttransport := &http.Transport{\n\t\tDialContext: dialer.DialContext,\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\treturn &HttpClient{\n\t\tconf: c,\n\t\tclient: &http.Client{\n\t\t\tTransport: transport,\n\t\t},\n\t\tretryCount: defaultRetryCount,\n\t\tretrier: retry.NewNoRetrier(),\n\t}\n}\n\n\/\/ SetRetryCount sets the retry count for the httpClient\nfunc (c *HttpClient) SetRetryCount(count int) {\n\tc.retryCount = count\n}\n\n\/\/ SetRetryCount sets the retry count for the httpClient\nfunc (c *HttpClient) SetRetrier(retrier retry.Retriable) {\n\tc.retrier = retrier\n}\n\n\/\/ Get makes a HTTP GET request to provided URL with context passed in\nfunc (c *HttpClient) Get(ctx context.Context, url string, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"GET - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Post makes a HTTP POST request to provided URL with context passed in\nfunc (c *HttpClient) Post(ctx context.Context, url string, body io.Reader, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodPost, url, body)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"POST - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Put makes a HTTP PUT request to provided URL with context passed in\nfunc (c *HttpClient) Put(ctx context.Context, url string, body io.Reader, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodPut, url, body)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"PUT - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Patch makes a HTTP PATCH request to provided URL with context passed in\nfunc (c *HttpClient) Patch(ctx context.Context, url string, body io.Reader, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodPatch, url, body)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"PATCH - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Delete makes a HTTP DELETE request to provided URL with context passed in\nfunc (c *HttpClient) Delete(ctx context.Context, url string, headers http.Header, res interface{}) (err error) {\n\trequest, err := http.NewRequest(http.MethodDelete, url, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"DELETE - request creation failed\")\n\t}\n\n\trequest.Header = headers\n\n\treturn c.Do(ctx, request, res)\n}\n\n\/\/ Do makes an HTTP request with the native `http.Do` interface and context passed in\nfunc (c *HttpClient) Do(ctx context.Context, req *http.Request, res interface{}) (err error) {\n\tfor i := 0; i <= c.retryCount; i++ {\n\t\tif err = c.request(ctx, req, res); err != nil {\n\t\t\terr = errors.Wrap(err, \"request - request failed\")\n\t\t\tbackoffTime := c.retrier.NextInterval(i)\n\t\t\ttime.Sleep(backoffTime)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\nfunc (c *HttpClient) request(ctx context.Context, req *http.Request, res interface{}) (err error) {\n\tvar (\n\t\tresponse *http.Response\n\t\tbs []byte\n\t\tcancel func()\n\t)\n\tctx, cancel = context.WithTimeout(ctx, time.Duration(c.conf.Timeout))\n\tdefer cancel()\n\tresponse, err = c.client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t}\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode >= http.StatusInternalServerError {\n\t\terr = errors.Wrap(err, \"StatusInternalServerError - Status Internal ServerError\")\n\t\treturn\n\t}\n\tif bs, err = readAll(response.Body, minRead); err != nil {\n\t\terr = errors.Wrap(err, \"readAll - readAll failed\")\n\t\treturn\n\t}\n\tif res != nil {\n\t\tif err = json.Unmarshal(bs, res); err != nil {\n\t\t\terr = errors.Wrap(err, \"Unmarshal failed\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc readAll(r io.Reader, capacity int64) (b []byte, err error) {\n\tbuf := bytes.NewBuffer(make([]byte, 0, capacity))\n\t\/\/ If the buffer overflows, we will get bytes.ErrTooLarge.\n\t\/\/ Return that as an error. Any other panic remains.\n\tdefer func() {\n\t\te := recover()\n\t\tif e == nil {\n\t\t\treturn\n\t\t}\n\t\tif panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {\n\t\t\terr = panicErr\n\t\t} else {\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\t_, err = buf.ReadFrom(r)\n\treturn buf.Bytes(), err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage node\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/kubernetes\/fake\"\n\tk8stesting \"k8s.io\/client-go\/testing\"\n)\n\n\/\/ TestCheckReadyForTests specifically is concerned about the multi-node logic\n\/\/ since single node checks are in TestReadyForTests.\nfunc TestCheckReadyForTests(t *testing.T) {\n\t\/\/ This is a duplicate definition of the constant in pkg\/controller\/service\/controller.go\n\tlabelNodeRoleControlPlane := \"node-role.kubernetes.io\/control-plane\"\n\n\tfromVanillaNode := func(f func(*v1.Node)) v1.Node {\n\t\tvanillaNode := &v1.Node{\n\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"test-node\"},\n\t\t\tStatus: v1.NodeStatus{\n\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t{Type: v1.NodeReady, Status: v1.ConditionTrue},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tf(vanillaNode)\n\t\treturn *vanillaNode\n\t}\n\n\ttcs := []struct {\n\t\tdesc string\n\t\tnonblockingTaints string\n\t\tallowedNotReadyNodes int\n\t\tnodes []v1.Node\n\t\tnodeListErr error\n\t\texpected bool\n\t\texpectedErr string\n\t}{\n\t\t{\n\t\t\tdesc: \"Vanilla node should pass\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Default value for nonblocking taints tolerates control plane taint\",\n\t\t\tnonblockingTaints: `node-role.kubernetes.io\/control-plane`,\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: labelNodeRoleControlPlane, Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should fail if effect is TaintEffectNoExecute\",\n\t\t\tnonblockingTaints: \"bar\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoExecute}}\n\t\t\t\t})},\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Tainted node can be allowed via allowedNotReadyNodes\",\n\t\t\tnonblockingTaints: \"bar\",\n\t\t\tallowedNotReadyNodes: 1,\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoExecute}}\n\t\t\t\t})},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, all OK\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, single blocking node blocks\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, single blocking node allowed via allowedNotReadyNodes\",\n\t\t\tallowedNotReadyNodes: 1,\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, single blocking node allowed via nonblocking taint\",\n\t\t\tnonblockingTaints: \"foo\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, both blocking nodes allowed via separate nonblocking taints\",\n\t\t\tnonblockingTaints: \"foo,bar\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"bar\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, one blocking node allowed via nonblocking taints still blocked\",\n\t\t\tnonblockingTaints: \"foo,notbar\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"bar\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Errors from node list are reported\",\n\t\t\tnodeListErr: errors.New(\"Forced error\"),\n\t\t\texpected: false,\n\t\t\texpectedErr: \"Forced error\",\n\t\t},\n\t}\n\n\t\/\/ Only determines some logging functionality; not relevant so set to a large value.\n\ttestLargeClusterThreshold := 1000\n\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tc := fake.NewSimpleClientset()\n\t\t\tc.PrependReactor(\"list\", \"nodes\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\tnodeList := &v1.NodeList{Items: tc.nodes}\n\t\t\t\treturn true, nodeList, tc.nodeListErr\n\t\t\t})\n\t\t\tcheckFunc := CheckReadyForTests(c, tc.nonblockingTaints, tc.allowedNotReadyNodes, testLargeClusterThreshold)\n\t\t\tout, err := checkFunc()\n\t\t\tif out != tc.expected {\n\t\t\t\tt.Errorf(\"Expected %v but got %v\", tc.expected, out)\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase err == nil && len(tc.expectedErr) > 0:\n\t\t\t\tt.Errorf(\"Expected error %q nil\", tc.expectedErr)\n\t\t\tcase err != nil && err.Error() != tc.expectedErr:\n\t\t\t\tt.Errorf(\"Expected error %q but got %q\", tc.expectedErr, err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestReadyForTests(t *testing.T) {\n\tfromVanillaNode := func(f func(*v1.Node)) *v1.Node {\n\t\tvanillaNode := &v1.Node{\n\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"test-node\"},\n\t\t\tStatus: v1.NodeStatus{\n\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t{Type: v1.NodeReady, Status: v1.ConditionTrue},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tf(vanillaNode)\n\t\treturn vanillaNode\n\t}\n\t_ = fromVanillaNode\n\ttcs := []struct {\n\t\tdesc string\n\t\tnode *v1.Node\n\t\tnonblockingTaints string\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tdesc: \"Vanilla node should pass\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Vanilla node should pass with non-applicable nonblocking taint\",\n\t\t\tnonblockingTaints: \"foo\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should pass if effect is TaintEffectPreferNoSchedule\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectPreferNoSchedule}}\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should fail if effect is TaintEffectNoExecute\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoExecute}}\n\t\t\t}),\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should fail\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t}),\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should pass if nonblocking\",\n\t\t\tnonblockingTaints: \"foo\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Node with network not ready fails\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Status.Conditions = append(n.Status.Conditions,\n\t\t\t\t\tv1.NodeCondition{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},\n\t\t\t\t)\n\t\t\t}),\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Node fails unless NodeReady status\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Status.Conditions = []v1.NodeCondition{}\n\t\t\t}),\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tout := readyForTests(tc.node, tc.nonblockingTaints)\n\t\t\tif out != tc.expected {\n\t\t\t\tt.Errorf(\"Expected %v but got %v\", tc.expected, out)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>e2e: fix node wait test<commit_after>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage node\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/kubernetes\/fake\"\n\tk8stesting \"k8s.io\/client-go\/testing\"\n)\n\n\/\/ TestCheckReadyForTests specifically is concerned about the multi-node logic\n\/\/ since single node checks are in TestReadyForTests.\nfunc TestCheckReadyForTests(t *testing.T) {\n\t\/\/ This is a duplicate definition of the constant in pkg\/controller\/service\/controller.go\n\tlabelNodeRoleControlPlane := \"node-role.kubernetes.io\/control-plane\"\n\n\tfromVanillaNode := func(f func(*v1.Node)) v1.Node {\n\t\tvanillaNode := &v1.Node{\n\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"test-node\"},\n\t\t\tStatus: v1.NodeStatus{\n\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t{Type: v1.NodeReady, Status: v1.ConditionTrue},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tf(vanillaNode)\n\t\treturn *vanillaNode\n\t}\n\n\ttcs := []struct {\n\t\tdesc string\n\t\tnonblockingTaints string\n\t\tallowedNotReadyNodes int\n\t\tnodes []v1.Node\n\t\tnodeListErr error\n\t\texpected bool\n\t\texpectedErr string\n\t}{\n\t\t{\n\t\t\tdesc: \"Vanilla node should pass\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Default value for nonblocking taints tolerates control plane taint\",\n\t\t\tnonblockingTaints: `node-role.kubernetes.io\/control-plane`,\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: labelNodeRoleControlPlane, Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should fail if effect is TaintEffectNoExecute\",\n\t\t\tnonblockingTaints: \"bar\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoExecute}}\n\t\t\t\t})},\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Tainted node can be allowed via allowedNotReadyNodes\",\n\t\t\tnonblockingTaints: \"bar\",\n\t\t\tallowedNotReadyNodes: 1,\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoExecute}}\n\t\t\t\t})},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, all OK\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, single blocking node blocks\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, single blocking node allowed via allowedNotReadyNodes\",\n\t\t\tallowedNotReadyNodes: 1,\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, single blocking node allowed via nonblocking taint\",\n\t\t\tnonblockingTaints: \"foo\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, both blocking nodes allowed via separate nonblocking taints\",\n\t\t\tnonblockingTaints: \"foo,bar\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"bar\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Multi-node, one blocking node allowed via nonblocking taints still blocked\",\n\t\t\tnonblockingTaints: \"foo,notbar\",\n\t\t\tnodes: []v1.Node{\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t\tfromVanillaNode(func(n *v1.Node) {\n\t\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"bar\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t\t}),\n\t\t\t},\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Errors from node list are reported\",\n\t\t\tnodeListErr: errors.New(\"Forced error\"),\n\t\t\texpected: false,\n\t\t\texpectedErr: \"Forced error\",\n\t\t},\n\t}\n\n\t\/\/ Only determines some logging functionality; not relevant so set to a large value.\n\ttestLargeClusterThreshold := 1000\n\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tc := fake.NewSimpleClientset()\n\t\t\tc.PrependReactor(\"list\", \"nodes\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\tnodeList := &v1.NodeList{Items: tc.nodes}\n\t\t\t\treturn true, nodeList, tc.nodeListErr\n\t\t\t})\n\t\t\tcheckFunc := CheckReadyForTests(c, tc.nonblockingTaints, tc.allowedNotReadyNodes, testLargeClusterThreshold)\n\t\t\t\/\/ The check function returns \"false, nil\" during its\n\t\t\t\/\/ first two calls, therefore we have to try several\n\t\t\t\/\/ times until we get the expected error.\n\t\t\tfor attempt := 0; attempt <= 3; attempt++ {\n\t\t\t\tout, err := checkFunc()\n\t\t\t\texpected := tc.expected\n\t\t\t\texpectedErr := tc.expectedErr\n\t\t\t\tif tc.nodeListErr != nil && attempt < 2 {\n\t\t\t\t\texpected = false\n\t\t\t\t\texpectedErr = \"\"\n\t\t\t\t}\n\t\t\t\tif out != expected {\n\t\t\t\t\tt.Errorf(\"Expected %v but got %v\", expected, out)\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase err == nil && expectedErr != \"\":\n\t\t\t\t\tt.Errorf(\"attempt #%d: expected error %q nil\", attempt, expectedErr)\n\t\t\t\tcase err != nil && err.Error() != expectedErr:\n\t\t\t\t\tt.Errorf(\"attempt #%d: expected error %q but got %q\", attempt, expectedErr, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestReadyForTests(t *testing.T) {\n\tfromVanillaNode := func(f func(*v1.Node)) *v1.Node {\n\t\tvanillaNode := &v1.Node{\n\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"test-node\"},\n\t\t\tStatus: v1.NodeStatus{\n\t\t\t\tConditions: []v1.NodeCondition{\n\t\t\t\t\t{Type: v1.NodeReady, Status: v1.ConditionTrue},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tf(vanillaNode)\n\t\treturn vanillaNode\n\t}\n\t_ = fromVanillaNode\n\ttcs := []struct {\n\t\tdesc string\n\t\tnode *v1.Node\n\t\tnonblockingTaints string\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tdesc: \"Vanilla node should pass\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Vanilla node should pass with non-applicable nonblocking taint\",\n\t\t\tnonblockingTaints: \"foo\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should pass if effect is TaintEffectPreferNoSchedule\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectPreferNoSchedule}}\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should fail if effect is TaintEffectNoExecute\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoExecute}}\n\t\t\t}),\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should fail\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t}),\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Tainted node should pass if nonblocking\",\n\t\t\tnonblockingTaints: \"foo\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Spec.Taints = []v1.Taint{{Key: \"foo\", Effect: v1.TaintEffectNoSchedule}}\n\t\t\t}),\n\t\t\texpected: true,\n\t\t}, {\n\t\t\tdesc: \"Node with network not ready fails\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Status.Conditions = append(n.Status.Conditions,\n\t\t\t\t\tv1.NodeCondition{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},\n\t\t\t\t)\n\t\t\t}),\n\t\t\texpected: false,\n\t\t}, {\n\t\t\tdesc: \"Node fails unless NodeReady status\",\n\t\t\tnode: fromVanillaNode(func(n *v1.Node) {\n\t\t\t\tn.Status.Conditions = []v1.NodeCondition{}\n\t\t\t}),\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tout := readyForTests(tc.node, tc.nonblockingTaints)\n\t\t\tif out != tc.expected {\n\t\t\t\tt.Errorf(\"Expected %v but got %v\", tc.expected, out)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build integration\n\/\/ +build integration\n\n\/*\nCopyright 2021 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tmountGID = \"0\"\n\tmountMSize = \"6543\"\n\tmountMode = \"0777\"\n\tmountPort = \"46464\"\n\tmountUID = \"0\"\n)\n\n\/\/ TestMountStart tests using the mount command on start\nfunc TestMountStart(t *testing.T) {\n\tif NoneDriver() {\n\t\tt.Skip(\"skipping: none driver does not support mount\")\n\t}\n\n\ttype validateFunc func(context.Context, *testing.T, string)\n\tprofile1 := UniqueProfileName(\"mount-start-1\")\n\tprofile2 := UniqueProfileName(\"mount-start-2\")\n\tctx, cancel := context.WithTimeout(context.Background(), Minutes(15))\n\tdefer Cleanup(t, profile1, cancel)\n\tdefer Cleanup(t, profile2, cancel)\n\n\t\/\/ Serial tests\n\tt.Run(\"serial\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tvalidator validateFunc\n\t\t\tprofile string\n\t\t}{\n\t\t\t{\"StartWithMountFirst\", validateStartWithMount, profile1},\n\t\t\t{\"StartWithMountSecond\", validateStartWithMount, profile2},\n\t\t\t{\"VerifyMountFirst\", validateMount, profile1},\n\t\t\t{\"VerifyMountSecond\", validateMount, profile2},\n\t\t\t{\"DeleteFirst\", validateDelete, profile1},\n\t\t\t{\"VerifyMountPostDelete\", validateMount, profile2},\n\t\t\t{\"Stop\", validateMountStop, profile2},\n\t\t\t{\"RestartStopped\", validateRestart, profile2},\n\t\t\t{\"VerifyMountPostStop\", validateMount, profile2},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\t\tt.Fatalf(\"Unable to run more tests (deadline exceeded)\")\n\t\t\t}\n\n\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\ttest.validator(ctx, t, test.profile)\n\t\t\t})\n\t\t}\n\t})\n}\n\n\/\/ validateStartWithMount starts a cluster with mount enabled\nfunc validateStartWithMount(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\targs := []string{\"start\", \"-p\", profile, \"--memory=2048\", \"--mount\", \"--mount-gid\", mountGID, \"--mount-msize\", mountMSize, \"--mount-mode\", mountMode, \"--mount-port\", mountPort, \"--mount-uid\", mountUID}\n\targs = append(args, StartArgs()...)\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to start minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\n\/\/ validateMount checks if the cluster has a folder mounted\nfunc validateMount(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\tsshArgs := []string{\"-p\", profile, \"ssh\", \"--\"}\n\n\targs := sshArgs\n\targs = append(args, \"ls\", \"\/minikube-host\")\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"mount failed: %q : %v\", rr.Command(), err)\n\t}\n\n\t\/\/ Docker has it's own mounting method, it doesn't respect the mounting flags\n\tif DockerDriver() {\n\t\treturn\n\t}\n\n\targs = sshArgs\n\targs = append(args, \"stat\", \"--format\", \"'%a'\", \"\/minikube-host\")\n\trr, err = Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get directory mode: %v\", err)\n\t}\n\n\twant := \"777\"\n\tif !strings.Contains(rr.Output(), want) {\n\t\tt.Errorf(\"wanted mode to be %q; got: %q\", want, rr.Output())\n\t}\n\n\t\/\/ We can't get the mount details with Hyper-V\n\tif HyperVDriver() {\n\t\treturn\n }\n\n\targs = sshArgs\n\targs = append(args, \"mount\", \"|\", \"grep\", \"9p\")\n\trr, err = Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get mount information: %v\", err)\n\t}\n\n\tflags := []struct {\n\t\tkey string\n\t\texpected string\n\t}{\n\t\t{\"gid\", mountGID},\n\t\t{\"msize\", mountMSize},\n\t\t{\"port\", mountPort},\n\t\t{\"uid\", mountUID},\n\t}\n\n\tfor _, flag := range flags {\n\t\twant := fmt.Sprintf(\"%s=%s\", flag.key, flag.expected)\n\t\tif !strings.Contains(rr.Output(), want) {\n\t\t\tt.Errorf(\"wanted gid to be: %q; got: %q\", want, rr.Output())\n\t\t}\n\t}\n}\n\n\/\/ validateMountStop stops a cluster\nfunc validateMountStop(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\targs := []string{\"stop\", \"-p\", profile}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"stop failed: %q : %v\", rr.Command(), err)\n\t}\n}\n\n\/\/ validateRestart restarts a cluster\nfunc validateRestart(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\targs := []string{\"start\", \"-p\", profile}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"restart failed: %q : %v\", rr.Command(), err)\n\t}\n}\n<commit_msg>format<commit_after>\/\/go:build integration\n\/\/ +build integration\n\n\/*\nCopyright 2021 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst (\n\tmountGID = \"0\"\n\tmountMSize = \"6543\"\n\tmountMode = \"0777\"\n\tmountPort = \"46464\"\n\tmountUID = \"0\"\n)\n\n\/\/ TestMountStart tests using the mount command on start\nfunc TestMountStart(t *testing.T) {\n\tif NoneDriver() {\n\t\tt.Skip(\"skipping: none driver does not support mount\")\n\t}\n\n\ttype validateFunc func(context.Context, *testing.T, string)\n\tprofile1 := UniqueProfileName(\"mount-start-1\")\n\tprofile2 := UniqueProfileName(\"mount-start-2\")\n\tctx, cancel := context.WithTimeout(context.Background(), Minutes(15))\n\tdefer Cleanup(t, profile1, cancel)\n\tdefer Cleanup(t, profile2, cancel)\n\n\t\/\/ Serial tests\n\tt.Run(\"serial\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tvalidator validateFunc\n\t\t\tprofile string\n\t\t}{\n\t\t\t{\"StartWithMountFirst\", validateStartWithMount, profile1},\n\t\t\t{\"StartWithMountSecond\", validateStartWithMount, profile2},\n\t\t\t{\"VerifyMountFirst\", validateMount, profile1},\n\t\t\t{\"VerifyMountSecond\", validateMount, profile2},\n\t\t\t{\"DeleteFirst\", validateDelete, profile1},\n\t\t\t{\"VerifyMountPostDelete\", validateMount, profile2},\n\t\t\t{\"Stop\", validateMountStop, profile2},\n\t\t\t{\"RestartStopped\", validateRestart, profile2},\n\t\t\t{\"VerifyMountPostStop\", validateMount, profile2},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\t\tt.Fatalf(\"Unable to run more tests (deadline exceeded)\")\n\t\t\t}\n\n\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\ttest.validator(ctx, t, test.profile)\n\t\t\t})\n\t\t}\n\t})\n}\n\n\/\/ validateStartWithMount starts a cluster with mount enabled\nfunc validateStartWithMount(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\targs := []string{\"start\", \"-p\", profile, \"--memory=2048\", \"--mount\", \"--mount-gid\", mountGID, \"--mount-msize\", mountMSize, \"--mount-mode\", mountMode, \"--mount-port\", mountPort, \"--mount-uid\", mountUID}\n\targs = append(args, StartArgs()...)\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to start minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\n\/\/ validateMount checks if the cluster has a folder mounted\nfunc validateMount(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\tsshArgs := []string{\"-p\", profile, \"ssh\", \"--\"}\n\n\targs := sshArgs\n\targs = append(args, \"ls\", \"\/minikube-host\")\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"mount failed: %q : %v\", rr.Command(), err)\n\t}\n\n\t\/\/ Docker has it's own mounting method, it doesn't respect the mounting flags\n\tif DockerDriver() {\n\t\treturn\n\t}\n\n\targs = sshArgs\n\targs = append(args, \"stat\", \"--format\", \"'%a'\", \"\/minikube-host\")\n\trr, err = Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get directory mode: %v\", err)\n\t}\n\n\twant := \"777\"\n\tif !strings.Contains(rr.Output(), want) {\n\t\tt.Errorf(\"wanted mode to be %q; got: %q\", want, rr.Output())\n\t}\n\n\t\/\/ We can't get the mount details with Hyper-V\n\tif HyperVDriver() {\n\t\treturn\n\t}\n\n\targs = sshArgs\n\targs = append(args, \"mount\", \"|\", \"grep\", \"9p\")\n\trr, err = Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get mount information: %v\", err)\n\t}\n\n\tflags := []struct {\n\t\tkey string\n\t\texpected string\n\t}{\n\t\t{\"gid\", mountGID},\n\t\t{\"msize\", mountMSize},\n\t\t{\"port\", mountPort},\n\t\t{\"uid\", mountUID},\n\t}\n\n\tfor _, flag := range flags {\n\t\twant := fmt.Sprintf(\"%s=%s\", flag.key, flag.expected)\n\t\tif !strings.Contains(rr.Output(), want) {\n\t\t\tt.Errorf(\"wanted gid to be: %q; got: %q\", want, rr.Output())\n\t\t}\n\t}\n}\n\n\/\/ validateMountStop stops a cluster\nfunc validateMountStop(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\targs := []string{\"stop\", \"-p\", profile}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"stop failed: %q : %v\", rr.Command(), err)\n\t}\n}\n\n\/\/ validateRestart restarts a cluster\nfunc validateRestart(ctx context.Context, t *testing.T, profile string) {\n\tdefer PostMortemLogs(t, profile)\n\n\targs := []string{\"start\", \"-p\", profile}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"restart failed: %q : %v\", rr.Command(), err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package multibot\n\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"torpedobot\/common\"\n\t\"torpedobot\/memcache\"\n\t\"sync\"\n\n\t\"github.com\/getsentry\/raven-go\"\n)\n\nvar bot *TorpedoBot\nvar once sync.Once\n\ntype TorpedoBot struct {\n\tcaches map[string]*memcache.MemCacheType\n\tcommandHandlers map[string]func(*TorpedoBotAPI, interface{}, string)\n\tconfig struct {\n\t\tSkypeIncomingAddr string\n\t}\n\tlogger *log.Logger\n}\n\nfunc (tb *TorpedoBot) PostMessage(channel interface{}, message string, api *TorpedoBotAPI, richmsgs ...RichMessage) {\n\tif len(richmsgs) > 0 {\n\t\tapi.PostMessage(channel, message, richmsgs[0])\n\t} else {\n\t\tapi.PostMessage(channel, message)\n\t}\n\n}\n\nfunc (tb *TorpedoBot) processChannelEvent(api *TorpedoBotAPI, channel interface{}, incoming_message string) {\n\tif strings.HasPrefix(incoming_message, api.CommandPrefix) {\n\t\tcommand := strings.TrimPrefix(incoming_message, api.CommandPrefix)\n\t\tfound := 0\n\t\tfor handler := range tb.commandHandlers {\n\t\t\tif strings.HasPrefix(strings.Split(command, \" \")[0], handler) {\n\t\t\t\tfound += 1\n\t\t\t\ttb.commandHandlers[handler](api, channel, incoming_message)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttb.logger.Printf(\"PROCESS! -> `%s`\", command)\n\t\tif found == 0 {\n\t\t\tapi.PostMessage(channel, fmt.Sprintf(\"Could not process your message: %s%s. Command unknown. Send %shelp for list of valid commands.\", api.CommandPrefix, command, api.CommandPrefix))\n\t\t}\n\t}\n}\n\nfunc (tb *TorpedoBot) RunLoop() {\n\tfor {\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc (tb *TorpedoBot) RunBotsCSV(method func(apiKey, cmd_prefix string), CSV, cmd_prefix string) {\n\twrapped := func(a, b string) {}\n\tif os.Getenv(\"SENTRY_DSN\") != \"\" {\n\t\ttb.logger.Print(\"Using Sentry error reporting...\\n\")\n\t\twrapped = func(apiKey, cmd_prefix string) {\n\t\t\traven.CapturePanic(func() {\n\t\t\t\tmethod(apiKey, cmd_prefix)\n\t\t\t}, nil)\n\t\t}\n\t} else {\n\t\twrapped = method\n\t}\n\tfor _, key := range strings.Split(CSV, \",\") {\n\t\tif key == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tgo wrapped(key, cmd_prefix)\n\t}\n}\n\nfunc (tb *TorpedoBot) RegisterHandlers(handlers map[string]func(*TorpedoBotAPI, interface{}, string)) {\n\ttb.commandHandlers = handlers\n\treturn\n}\n\nfunc (tb *TorpedoBot) GetCommandHandlers() (handlers map[string]func(*TorpedoBotAPI, interface{}, string)) {\n\treturn tb.commandHandlers\n}\n\nfunc (tb *TorpedoBot) GetCreateCache(name string) (cache *memcache.MemCacheType) {\n\tvalue, success := tb.caches[name]\n\tif !success {\n\t\tcache = memcache.New()\n\t\ttb.caches[name] = cache\n\t} else {\n\t\tcache = value\n\t}\n\treturn\n}\n\nfunc (tb *TorpedoBot) GetCachedItem(name string) (item string) {\n\tcache := *tb.GetCreateCache(name)\n\tif cache.Len() > 0 {\n\t\ttb.logger.Printf(\"\\nUsing cached quote...%v\\n\", cache.Len())\n\t\tkey := \"\"\n\t\tfor key = range cache.Cache() {\n\t\t\tbreak\n\t\t}\n\t\tquote, _ := cache.Get(key)\n\t\tcache.Delete(key)\n\t\titem = quote\n\t}\n\treturn\n}\n\nfunc (tb *TorpedoBot) SetCachedItems(name string, items map[int]string) (item string) {\n\tcache := *tb.GetCreateCache(name)\n\tfor idx := range items {\n\t\tmessage := common.MD5Hash(items[idx])\n\t\t_, ok := cache.Get(message)\n\t\tif !ok {\n\t\t\tcache.Set(message, items[idx])\n\t\t}\n\t}\n\n\titem = items[0]\n\tmessage := common.MD5Hash(item)\n\tcache.Delete(message)\n\treturn\n}\n\n\nfunc New(skype_incoming_addr string) *TorpedoBot {\n\tonce.Do(func() {\n\t\tbot = &TorpedoBot{}\n\t\tbot.logger = log.New(os.Stdout, \"torpedo-bot: \", log.Lshortfile|log.LstdFlags)\n\t\tbot.caches = make(map[string]*memcache.MemCacheType)\n\t\tbot.config.SkypeIncomingAddr = skype_incoming_addr\n\t})\n\treturn bot\n}\n<commit_msg>raven wait<commit_after>package multibot\n\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"torpedobot\/common\"\n\t\"torpedobot\/memcache\"\n\t\"sync\"\n\n\t\"github.com\/getsentry\/raven-go\"\n)\n\nvar bot *TorpedoBot\nvar once sync.Once\n\ntype TorpedoBot struct {\n\tcaches map[string]*memcache.MemCacheType\n\tcommandHandlers map[string]func(*TorpedoBotAPI, interface{}, string)\n\tconfig struct {\n\t\tSkypeIncomingAddr string\n\t}\n\tlogger *log.Logger\n}\n\nfunc (tb *TorpedoBot) PostMessage(channel interface{}, message string, api *TorpedoBotAPI, richmsgs ...RichMessage) {\n\tif len(richmsgs) > 0 {\n\t\tapi.PostMessage(channel, message, richmsgs[0])\n\t} else {\n\t\tapi.PostMessage(channel, message)\n\t}\n\n}\n\nfunc (tb *TorpedoBot) processChannelEvent(api *TorpedoBotAPI, channel interface{}, incoming_message string) {\n\tif strings.HasPrefix(incoming_message, api.CommandPrefix) {\n\t\tcommand := strings.TrimPrefix(incoming_message, api.CommandPrefix)\n\t\tfound := 0\n\t\tfor handler := range tb.commandHandlers {\n\t\t\tif strings.HasPrefix(strings.Split(command, \" \")[0], handler) {\n\t\t\t\tfound += 1\n\t\t\t\ttb.commandHandlers[handler](api, channel, incoming_message)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttb.logger.Printf(\"PROCESS! -> `%s`\", command)\n\t\tif found == 0 {\n\t\t\tapi.PostMessage(channel, fmt.Sprintf(\"Could not process your message: %s%s. Command unknown. Send %shelp for list of valid commands.\", api.CommandPrefix, command, api.CommandPrefix))\n\t\t}\n\t}\n}\n\nfunc (tb *TorpedoBot) RunLoop() {\n\tfor {\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc (tb *TorpedoBot) RunBotsCSV(method func(apiKey, cmd_prefix string), CSV, cmd_prefix string) {\n\twrapped := func(a, b string) {}\n\tenv_dsn := os.Getenv(\"SENTRY_DSN\")\n\tif env_dsn != \"\" {\n\t\ttb.logger.Print(\"Using Sentry error reporting...\\n\")\n\t\traven.SetDSN(env_dsn)\n\t\twrapped = func(apiKey, cmd_prefix string) {\n\t\t\traven.CapturePanicAndWait(func() {\n\t\t\t\tmethod(apiKey, cmd_prefix)\n\t\t\t}, nil)\n\t\t}\n\t} else {\n\t\twrapped = method\n\t}\n\tfor _, key := range strings.Split(CSV, \",\") {\n\t\tif key == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tgo wrapped(key, cmd_prefix)\n\t}\n}\n\nfunc (tb *TorpedoBot) RegisterHandlers(handlers map[string]func(*TorpedoBotAPI, interface{}, string)) {\n\ttb.commandHandlers = handlers\n\treturn\n}\n\nfunc (tb *TorpedoBot) GetCommandHandlers() (handlers map[string]func(*TorpedoBotAPI, interface{}, string)) {\n\treturn tb.commandHandlers\n}\n\nfunc (tb *TorpedoBot) GetCreateCache(name string) (cache *memcache.MemCacheType) {\n\tvalue, success := tb.caches[name]\n\tif !success {\n\t\tcache = memcache.New()\n\t\ttb.caches[name] = cache\n\t} else {\n\t\tcache = value\n\t}\n\treturn\n}\n\nfunc (tb *TorpedoBot) GetCachedItem(name string) (item string) {\n\tcache := *tb.GetCreateCache(name)\n\tif cache.Len() > 0 {\n\t\ttb.logger.Printf(\"\\nUsing cached quote...%v\\n\", cache.Len())\n\t\tkey := \"\"\n\t\tfor key = range cache.Cache() {\n\t\t\tbreak\n\t\t}\n\t\tquote, _ := cache.Get(key)\n\t\tcache.Delete(key)\n\t\titem = quote\n\t}\n\treturn\n}\n\nfunc (tb *TorpedoBot) SetCachedItems(name string, items map[int]string) (item string) {\n\tcache := *tb.GetCreateCache(name)\n\tfor idx := range items {\n\t\tmessage := common.MD5Hash(items[idx])\n\t\t_, ok := cache.Get(message)\n\t\tif !ok {\n\t\t\tcache.Set(message, items[idx])\n\t\t}\n\t}\n\n\titem = items[0]\n\tmessage := common.MD5Hash(item)\n\tcache.Delete(message)\n\treturn\n}\n\n\nfunc New(skype_incoming_addr string) *TorpedoBot {\n\tonce.Do(func() {\n\t\tbot = &TorpedoBot{}\n\t\tbot.logger = log.New(os.Stdout, \"torpedo-bot: \", log.Lshortfile|log.LstdFlags)\n\t\tbot.caches = make(map[string]*memcache.MemCacheType)\n\t\tbot.config.SkypeIncomingAddr = skype_incoming_addr\n\t})\n\treturn bot\n}\n<|endoftext|>"} {"text":"<commit_before>package logic\n\nimport \"sync\"\n\ntype Rule interface {\n\tCondState() bool\n\tSetCondState(bool)\n\tRunEnter()\n\tRunExit()\n\tAddExitAction(RuleAction)\n\tAddEnterAction(RuleAction)\n\tAddCondition(RuleCondition)\n\tConditions() []RuleCondition\n\tUuid() string\n\tSetUuid(string)\n}\n\ntype rule struct {\n\tName string `json:\"name\"`\n\tUuid_ string `json:\"uuid\"`\n\tConditions_ []RuleCondition `json:\"conditions\"`\n\tEnterActions_ []RuleAction `json:\"enterActions\"`\n\tExitActions_ []RuleAction `json:\"exitActions\"`\n\tcondState bool\n\tsync.RWMutex\n}\n\nfunc (r *rule) Uuid() string {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn r.Uuid_\n}\nfunc (r *rule) SetUuid(uuid string) {\n\tr.Lock()\n\tr.Uuid_ = uuid\n\tr.Unlock()\n}\nfunc (r *rule) CondState() bool {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn r.condState\n}\nfunc (r *rule) Conditions() []RuleCondition {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn r.Conditions_\n}\nfunc (r *rule) SetCondState(cond bool) {\n\tr.RLock()\n\tr.condState = cond\n\tr.RUnlock()\n}\nfunc (r *rule) RunEnter() {\n\tfor _, a := range r.EnterActions_ {\n\t\ta.RunCommand()\n\t}\n}\nfunc (r *rule) RunExit() {\n\tfor _, a := range r.ExitActions_ {\n\t\ta.RunCommand()\n\t}\n}\nfunc (r *rule) AddExitAction(a RuleAction) {\n\tr.Lock()\n\tr.ExitActions_ = append(r.ExitActions_, a)\n\tr.Unlock()\n}\nfunc (r *rule) AddEnterAction(a RuleAction) {\n\tr.Lock()\n\tr.EnterActions_ = append(r.EnterActions_, a)\n\tr.Unlock()\n}\nfunc (r *rule) AddCondition(a RuleCondition) {\n\tr.Lock()\n\tr.Conditions_ = append(r.Conditions_, a)\n\tr.Unlock()\n}\n<commit_msg>separated rule and ruleaction into own files<commit_after>package logic\n\nimport \"sync\"\n\ntype Rule interface {\n\tUuid() string\n\tSetUuid(string)\n\tCondState() bool\n\tSetCondState(bool)\n\tRunEnter()\n\tRunExit()\n\tAddExitAction(RuleAction)\n\tAddEnterAction(RuleAction)\n\tAddCondition(RuleCondition)\n\tConditions() []RuleCondition\n}\n\ntype rule struct {\n\tName string `json:\"name\"`\n\tUuid_ string `json:\"uuid\"`\n\tConditions_ []RuleCondition `json:\"conditions\"`\n\tEnterActions_ []RuleAction `json:\"enterActions\"`\n\tExitActions_ []RuleAction `json:\"exitActions\"`\n\tcondState bool\n\tsync.RWMutex\n}\n\nfunc (r *rule) Uuid() string {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn r.Uuid_\n}\nfunc (r *rule) SetUuid(uuid string) {\n\tr.Lock()\n\tr.Uuid_ = uuid\n\tr.Unlock()\n}\nfunc (r *rule) CondState() bool {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn r.condState\n}\nfunc (r *rule) Conditions() []RuleCondition {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn r.Conditions_\n}\nfunc (r *rule) SetCondState(cond bool) {\n\tr.RLock()\n\tr.condState = cond\n\tr.RUnlock()\n}\nfunc (r *rule) RunEnter() {\n\tfor _, a := range r.EnterActions_ {\n\t\ta.RunCommand()\n\t}\n}\nfunc (r *rule) RunExit() {\n\tfor _, a := range r.ExitActions_ {\n\t\ta.RunCommand()\n\t}\n}\nfunc (r *rule) AddExitAction(a RuleAction) {\n\tr.Lock()\n\tr.ExitActions_ = append(r.ExitActions_, a)\n\tr.Unlock()\n}\nfunc (r *rule) AddEnterAction(a RuleAction) {\n\tr.Lock()\n\tr.EnterActions_ = append(r.EnterActions_, a)\n\tr.Unlock()\n}\nfunc (r *rule) AddCondition(a RuleCondition) {\n\tr.Lock()\n\tr.Conditions_ = append(r.Conditions_, a)\n\tr.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 gf Author(https:\/\/gitee.com\/johng\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/gitee.com\/johng\/gf.\n\n\/\/ Package gdb provides ORM features for popular relationship databases.\n\/\/ 数据库ORM.\n\/\/ 默认内置支持MySQL, 其他数据库需要手动import对应的数据库引擎第三方包.\npackage gdb\n\nimport (\n \"database\/sql\"\n \"errors\"\n \"fmt\"\n \"gitee.com\/johng\/gf\/g\/container\/gring\"\n \"gitee.com\/johng\/gf\/g\/container\/gtype\"\n \"gitee.com\/johng\/gf\/g\/container\/gvar\"\n \"gitee.com\/johng\/gf\/g\/os\/gcache\"\n \"gitee.com\/johng\/gf\/g\/util\/grand\"\n _ \"gitee.com\/johng\/gf\/third\/github.com\/go-sql-driver\/mysql\"\n \"time\"\n)\n\n\/\/ 数据库操作接口\ntype DB interface {\n \/\/ 建立数据库连接方法(开发者一般不需要直接调用)\n Open(config *ConfigNode) (*sql.DB, error)\n\n\t\/\/ SQL操作方法 API\n\tQuery(query string, args ...interface{}) (*sql.Rows, error)\n\tExec(sql string, args ...interface{}) (sql.Result, error)\n\tPrepare(sql string, execOnMaster...bool) (*sql.Stmt, error)\n\n \/\/ 内部实现API的方法(不同数据库可覆盖这些方法实现自定义的操作)\n doQuery(link dbLink, query string, args ...interface{}) (rows *sql.Rows, err error)\n doExec(link dbLink, query string, args ...interface{}) (result sql.Result, err error)\n doPrepare(link dbLink, query string) (*sql.Stmt, error)\n doInsert(link dbLink, table string, data Map, option int) (result sql.Result, err error)\n doBatchInsert(link dbLink, table string, list List, batch int, option int) (result sql.Result, err error)\n doUpdate(link dbLink, table string, data interface{}, condition interface{}, args ...interface{}) (result sql.Result, err error)\n doDelete(link dbLink, table string, condition interface{}, args ...interface{}) (result sql.Result, err error)\n\n\t\/\/ 数据库查询\n\tGetAll(query string, args ...interface{}) (Result, error)\n\tGetOne(query string, args ...interface{}) (Record, error)\n\tGetValue(query string, args ...interface{}) (Value, error)\n GetCount(query string, args ...interface{}) (int, error)\n GetStruct(obj interface{}, query string, args ...interface{}) error\n\n \/\/ 创建底层数据库master\/slave链接对象\n Master() (*sql.DB, error)\n Slave() (*sql.DB, error)\n\n \/\/ Ping\n\tPingMaster() error\n\tPingSlave() error\n\n\t\/\/ 开启事务操作\n\tBegin() (*TX, error)\n\n\t\/\/ 数据表插入\/更新\/保存操作\n\tInsert(table string, data Map) (sql.Result, error)\n\tReplace(table string, data Map) (sql.Result, error)\n\tSave(table string, data Map) (sql.Result, error)\n\n\t\/\/ 数据表插入\/更新\/保存操作(批量)\n\tBatchInsert(table string, list List, batch int) (sql.Result, error)\n\tBatchReplace(table string, list List, batch int) (sql.Result, error)\n\tBatchSave(table string, list List, batch int) (sql.Result, error)\n\n\t\/\/ 数据修改\/删除\n\tUpdate(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error)\n\tDelete(table string, condition interface{}, args ...interface{}) (sql.Result, error)\n\n\t\/\/ 创建链式操作对象(Table为From的别名)\n\tTable(tables string) *Model\n\tFrom(tables string) *Model\n\n\t\/\/ 设置管理\n SetDebug(debug bool)\n SetSchema(schema string)\n GetQueriedSqls() []*Sql\n PrintQueriedSqls()\n SetMaxIdleConns(n int)\n SetMaxOpenConns(n int)\n SetConnMaxLifetime(n int)\n\n\t\/\/ 内部方法接口\n\tgetCache() (*gcache.Cache)\n\tgetChars() (charLeft string, charRight string)\n\tgetDebug() bool\n filterFields(table string, data map[string]interface{}) map[string]interface{}\n getTableFields(table string) (map[string]string, error)\n handleSqlBeforeExec(sql string) string\n}\n\n\/\/ 执行底层数据库操作的核心接口\ntype dbLink interface {\n Query(query string, args ...interface{}) (*sql.Rows, error)\n Exec(sql string, args ...interface{}) (sql.Result, error)\n Prepare(sql string) (*sql.Stmt, error)\n}\n\n\/\/ 数据库链接对象\ntype dbBase struct {\n\tdb DB \/\/ 数据库对象\n\tgroup string \/\/ 配置分组名称\n\tdebug *gtype.Bool \/\/ (默认关闭)是否开启调试模式,当开启时会启用一些调试特性\n\tsqls *gring.Ring \/\/ (debug=true时有效)已执行的SQL列表\n\tcache *gcache.Cache \/\/ 数据库缓存,包括底层连接池对象缓存及查询缓存;需要注意的是,事务查询不支持查询缓存\n schema *gtype.String \/\/ 手动切换的数据库名称\n\tmaxIdleConnCount *gtype.Int \/\/ 连接池最大限制的连接数\n maxOpenConnCount *gtype.Int \/\/ 连接池最大打开的连接数\n maxConnLifetime *gtype.Int \/\/ (单位秒)连接对象可重复使用的时间长度\n}\n\n\/\/ 执行的SQL对象\ntype Sql struct {\n\tSql string \/\/ SQL语句(可能带有预处理占位符)\n\tArgs []interface{} \/\/ 预处理参数值列表\n\tError error \/\/ 执行结果(nil为成功)\n\tStart int64 \/\/ 执行开始时间(毫秒)\n\tEnd int64 \/\/ 执行结束时间(毫秒)\n\tFunc string \/\/ 执行方法\n}\n\n\/\/ 返回数据表记录值\ntype Value = gvar.VarRead\n\n\/\/ 返回数据表记录Map\ntype Record map[string]Value\n\n\/\/ 返回数据表记录List\ntype Result []Record\n\n\/\/ 关联数组,绑定一条数据表记录(使用别名)\ntype Map = map[string]interface{}\n\n\/\/ 关联数组列表(索引从0开始的数组),绑定多条记录(使用别名)\ntype List = []Map\n\nconst (\n OPTION_INSERT = 0\n OPTION_REPLACE = 1\n OPTION_SAVE = 2\n OPTION_IGNORE = 3\n \/\/ 默认的连接池连接存活时间(秒)\n gDEFAULT_CONN_MAX_LIFE = 10\n)\n\n\/\/ 使用默认\/指定分组配置进行连接,数据库集群配置项:default\nfunc New(groupName ...string) (db DB, err error) {\n\tgroup := config.d\n\tif len(groupName) > 0 {\n group = groupName[0]\n\t}\n\tconfig.RLock()\n\tdefer config.RUnlock()\n\n\tif len(config.c) < 1 {\n\t\treturn nil, errors.New(\"empty database configuration\")\n\t}\n\tif _, ok := config.c[group]; ok {\n\t if node, err := getConfigNodeByGroup(group, true); err == nil {\n\t base := &dbBase {\n group : group,\n debug : gtype.NewBool(),\n cache : gcache.New(),\n schema : gtype.NewString(),\n maxIdleConnCount : gtype.NewInt(),\n maxOpenConnCount : gtype.NewInt(),\n maxConnLifetime : gtype.NewInt(gDEFAULT_CONN_MAX_LIFE),\n }\n switch node.Type {\n case \"mysql\":\n base.db = &dbMysql{dbBase : base}\n case \"pgsql\":\n base.db = &dbPgsql{dbBase : base}\n case \"mssql\":\n base.db = &dbMssql{dbBase : base}\n case \"sqlite\":\n base.db = &dbSqlite{dbBase : base}\n case \"oracle\":\n base.db = &dbOracle{dbBase : base}\n default:\n return nil, errors.New(fmt.Sprintf(`unsupported database type \"%s\"`, node.Type))\n }\n return base.db, nil\n } else {\n return nil, err\n }\n\t} else {\n\t\treturn nil, errors.New(fmt.Sprintf(\"empty database configuration for item name '%s'\", group))\n\t}\n}\n\n\/\/ 获取指定数据库角色的一个配置项,内部根据权重计算负载均衡\nfunc getConfigNodeByGroup(group string, master bool) (*ConfigNode, error) {\n if list, ok := config.c[group]; ok {\n \/\/ 将master, slave集群列表拆分出来\n masterList := make(ConfigGroup, 0)\n slaveList := make(ConfigGroup, 0)\n for i := 0; i < len(list); i++ {\n if list[i].Role == \"slave\" {\n slaveList = append(slaveList, list[i])\n } else {\n masterList = append(masterList, list[i])\n }\n }\n if len(masterList) < 1 {\n return nil, errors.New(\"at least one master node configuration's need to make sense\")\n }\n if len(slaveList) < 1 {\n slaveList = masterList\n }\n if master {\n return getConfigNodeByPriority(masterList), nil\n } else {\n return getConfigNodeByPriority(slaveList), nil\n }\n } else {\n return nil, errors.New(fmt.Sprintf(\"empty database configuration for item name '%s'\", group))\n }\n}\n\n\/\/ 按照负载均衡算法(优先级配置)从数据库集群中选择一个配置节点出来使用\n\/\/ 算法说明举例,\n\/\/ 1、假如2个节点的priority都是1,那么随机大小范围为[0, 199];\n\/\/ 2、那么节点1的权重范围为[0, 99],节点2的权重范围为[100, 199],比例为1:1;\n\/\/ 3、假如计算出的随机数为99;\n\/\/ 4、那么选择的配置为节点1;\nfunc getConfigNodeByPriority(cg ConfigGroup) *ConfigNode {\n\tif len(cg) < 2 {\n\t\treturn &cg[0]\n\t}\n\tvar total int\n\tfor i := 0; i < len(cg); i++ {\n\t\ttotal += cg[i].Priority * 100\n\t}\n\t\/\/ 如果total为0表示所有连接都没有配置priority属性,那么默认都是1\n\tif total == 0 {\n for i := 0; i < len(cg); i++ {\n cg[i].Priority = 1\n total += cg[i].Priority * 100\n }\n }\n\t\/\/ 不能取到末尾的边界点\n\tr := grand.Rand(0, total)\n\tif r > 0 {\n\t\tr -= 1\n\t}\n\tmin := 0\n\tmax := 0\n\tfor i := 0; i < len(cg); i++ {\n\t\tmax = min + cg[i].Priority*100\n\t\t\/\/fmt.Printf(\"r: %d, min: %d, max: %d\\n\", r, min, max)\n\t\tif r >= min && r < max {\n\t\t\treturn &cg[i]\n\t\t} else {\n\t\t\tmin = max\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ 获得底层数据库链接对象\nfunc (bs *dbBase) getSqlDb(master bool) (sqlDb *sql.DB, err error) {\n \/\/ 负载均衡\n node, err := getConfigNodeByGroup(bs.group, master)\n if err != nil {\n return nil, err\n }\n \/\/ 默认值设定\n if node.Charset == \"\" {\n node.Charset = \"utf8\"\n }\n v := bs.cache.GetOrSetFuncLock(node.String(), func() interface{} {\n sqlDb, err = bs.db.Open(node)\n if err != nil {\n return nil\n }\n\n if n := bs.maxIdleConnCount.Val(); n > 0 {\n sqlDb.SetMaxIdleConns(n)\n } else if node.MaxIdleConnCount > 0 {\n sqlDb.SetMaxIdleConns(node.MaxIdleConnCount)\n }\n\n if n := bs.maxOpenConnCount.Val(); n > 0 {\n sqlDb.SetMaxOpenConns(n)\n } else if node.MaxOpenConnCount > 0 {\n sqlDb.SetMaxOpenConns(node.MaxOpenConnCount)\n }\n\n if n := bs.maxConnLifetime.Val(); n > 0 {\n sqlDb.SetConnMaxLifetime(time.Duration(n) * time.Second)\n } else if node.MaxConnLifetime > 0 {\n sqlDb.SetConnMaxLifetime(time.Duration(node.MaxConnLifetime) * time.Second)\n }\n return sqlDb\n }, 0)\n if v != nil && sqlDb == nil {\n sqlDb = v.(*sql.DB)\n }\n \/\/ 是否手动选择数据库\n if v := bs.schema.Val(); v != \"\" {\n sqlDb.Exec(\"USE \" + v)\n }\n return\n}\n\n\/\/ 切换操作的数据库(注意该切换是全局的)\nfunc (bs *dbBase) SetSchema(schema string) {\n bs.schema.Set(schema)\n}\n\n\/\/ 创建底层数据库master链接对象\nfunc (bs *dbBase) Master() (*sql.DB, error) {\n\treturn bs.getSqlDb(true)\n}\n\n\/\/ 创建底层数据库slave链接对象\nfunc (bs *dbBase) Slave() (*sql.DB, error) {\n return bs.getSqlDb(false)\n}\n<commit_msg>update default ConnMaxLifeTime to 30 seconds in gdb package<commit_after>\/\/ Copyright 2017 gf Author(https:\/\/gitee.com\/johng\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/gitee.com\/johng\/gf.\n\n\/\/ Package gdb provides ORM features for popular relationship databases.\n\/\/ 数据库ORM.\n\/\/ 默认内置支持MySQL, 其他数据库需要手动import对应的数据库引擎第三方包.\npackage gdb\n\nimport (\n \"database\/sql\"\n \"errors\"\n \"fmt\"\n \"gitee.com\/johng\/gf\/g\/container\/gring\"\n \"gitee.com\/johng\/gf\/g\/container\/gtype\"\n \"gitee.com\/johng\/gf\/g\/container\/gvar\"\n \"gitee.com\/johng\/gf\/g\/os\/gcache\"\n \"gitee.com\/johng\/gf\/g\/util\/grand\"\n _ \"gitee.com\/johng\/gf\/third\/github.com\/go-sql-driver\/mysql\"\n \"time\"\n)\n\n\/\/ 数据库操作接口\ntype DB interface {\n \/\/ 建立数据库连接方法(开发者一般不需要直接调用)\n Open(config *ConfigNode) (*sql.DB, error)\n\n\t\/\/ SQL操作方法 API\n\tQuery(query string, args ...interface{}) (*sql.Rows, error)\n\tExec(sql string, args ...interface{}) (sql.Result, error)\n\tPrepare(sql string, execOnMaster...bool) (*sql.Stmt, error)\n\n \/\/ 内部实现API的方法(不同数据库可覆盖这些方法实现自定义的操作)\n doQuery(link dbLink, query string, args ...interface{}) (rows *sql.Rows, err error)\n doExec(link dbLink, query string, args ...interface{}) (result sql.Result, err error)\n doPrepare(link dbLink, query string) (*sql.Stmt, error)\n doInsert(link dbLink, table string, data Map, option int) (result sql.Result, err error)\n doBatchInsert(link dbLink, table string, list List, batch int, option int) (result sql.Result, err error)\n doUpdate(link dbLink, table string, data interface{}, condition interface{}, args ...interface{}) (result sql.Result, err error)\n doDelete(link dbLink, table string, condition interface{}, args ...interface{}) (result sql.Result, err error)\n\n\t\/\/ 数据库查询\n\tGetAll(query string, args ...interface{}) (Result, error)\n\tGetOne(query string, args ...interface{}) (Record, error)\n\tGetValue(query string, args ...interface{}) (Value, error)\n GetCount(query string, args ...interface{}) (int, error)\n GetStruct(obj interface{}, query string, args ...interface{}) error\n\n \/\/ 创建底层数据库master\/slave链接对象\n Master() (*sql.DB, error)\n Slave() (*sql.DB, error)\n\n \/\/ Ping\n\tPingMaster() error\n\tPingSlave() error\n\n\t\/\/ 开启事务操作\n\tBegin() (*TX, error)\n\n\t\/\/ 数据表插入\/更新\/保存操作\n\tInsert(table string, data Map) (sql.Result, error)\n\tReplace(table string, data Map) (sql.Result, error)\n\tSave(table string, data Map) (sql.Result, error)\n\n\t\/\/ 数据表插入\/更新\/保存操作(批量)\n\tBatchInsert(table string, list List, batch int) (sql.Result, error)\n\tBatchReplace(table string, list List, batch int) (sql.Result, error)\n\tBatchSave(table string, list List, batch int) (sql.Result, error)\n\n\t\/\/ 数据修改\/删除\n\tUpdate(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error)\n\tDelete(table string, condition interface{}, args ...interface{}) (sql.Result, error)\n\n\t\/\/ 创建链式操作对象(Table为From的别名)\n\tTable(tables string) *Model\n\tFrom(tables string) *Model\n\n\t\/\/ 设置管理\n SetDebug(debug bool)\n SetSchema(schema string)\n GetQueriedSqls() []*Sql\n PrintQueriedSqls()\n SetMaxIdleConns(n int)\n SetMaxOpenConns(n int)\n SetConnMaxLifetime(n int)\n\n\t\/\/ 内部方法接口\n\tgetCache() (*gcache.Cache)\n\tgetChars() (charLeft string, charRight string)\n\tgetDebug() bool\n filterFields(table string, data map[string]interface{}) map[string]interface{}\n getTableFields(table string) (map[string]string, error)\n handleSqlBeforeExec(sql string) string\n}\n\n\/\/ 执行底层数据库操作的核心接口\ntype dbLink interface {\n Query(query string, args ...interface{}) (*sql.Rows, error)\n Exec(sql string, args ...interface{}) (sql.Result, error)\n Prepare(sql string) (*sql.Stmt, error)\n}\n\n\/\/ 数据库链接对象\ntype dbBase struct {\n\tdb DB \/\/ 数据库对象\n\tgroup string \/\/ 配置分组名称\n\tdebug *gtype.Bool \/\/ (默认关闭)是否开启调试模式,当开启时会启用一些调试特性\n\tsqls *gring.Ring \/\/ (debug=true时有效)已执行的SQL列表\n\tcache *gcache.Cache \/\/ 数据库缓存,包括底层连接池对象缓存及查询缓存;需要注意的是,事务查询不支持查询缓存\n schema *gtype.String \/\/ 手动切换的数据库名称\n\tmaxIdleConnCount *gtype.Int \/\/ 连接池最大限制的连接数\n maxOpenConnCount *gtype.Int \/\/ 连接池最大打开的连接数\n maxConnLifetime *gtype.Int \/\/ (单位秒)连接对象可重复使用的时间长度\n}\n\n\/\/ 执行的SQL对象\ntype Sql struct {\n\tSql string \/\/ SQL语句(可能带有预处理占位符)\n\tArgs []interface{} \/\/ 预处理参数值列表\n\tError error \/\/ 执行结果(nil为成功)\n\tStart int64 \/\/ 执行开始时间(毫秒)\n\tEnd int64 \/\/ 执行结束时间(毫秒)\n\tFunc string \/\/ 执行方法\n}\n\n\/\/ 返回数据表记录值\ntype Value = gvar.VarRead\n\n\/\/ 返回数据表记录Map\ntype Record map[string]Value\n\n\/\/ 返回数据表记录List\ntype Result []Record\n\n\/\/ 关联数组,绑定一条数据表记录(使用别名)\ntype Map = map[string]interface{}\n\n\/\/ 关联数组列表(索引从0开始的数组),绑定多条记录(使用别名)\ntype List = []Map\n\nconst (\n OPTION_INSERT = 0\n OPTION_REPLACE = 1\n OPTION_SAVE = 2\n OPTION_IGNORE = 3\n \/\/ 默认的连接池连接存活时间(秒)\n gDEFAULT_CONN_MAX_LIFE = 30\n)\n\n\/\/ 使用默认\/指定分组配置进行连接,数据库集群配置项:default\nfunc New(groupName ...string) (db DB, err error) {\n\tgroup := config.d\n\tif len(groupName) > 0 {\n group = groupName[0]\n\t}\n\tconfig.RLock()\n\tdefer config.RUnlock()\n\n\tif len(config.c) < 1 {\n\t\treturn nil, errors.New(\"empty database configuration\")\n\t}\n\tif _, ok := config.c[group]; ok {\n\t if node, err := getConfigNodeByGroup(group, true); err == nil {\n\t base := &dbBase {\n group : group,\n debug : gtype.NewBool(),\n cache : gcache.New(),\n schema : gtype.NewString(),\n maxIdleConnCount : gtype.NewInt(),\n maxOpenConnCount : gtype.NewInt(),\n maxConnLifetime : gtype.NewInt(gDEFAULT_CONN_MAX_LIFE),\n }\n switch node.Type {\n case \"mysql\":\n base.db = &dbMysql{dbBase : base}\n case \"pgsql\":\n base.db = &dbPgsql{dbBase : base}\n case \"mssql\":\n base.db = &dbMssql{dbBase : base}\n case \"sqlite\":\n base.db = &dbSqlite{dbBase : base}\n case \"oracle\":\n base.db = &dbOracle{dbBase : base}\n default:\n return nil, errors.New(fmt.Sprintf(`unsupported database type \"%s\"`, node.Type))\n }\n return base.db, nil\n } else {\n return nil, err\n }\n\t} else {\n\t\treturn nil, errors.New(fmt.Sprintf(\"empty database configuration for item name '%s'\", group))\n\t}\n}\n\n\/\/ 获取指定数据库角色的一个配置项,内部根据权重计算负载均衡\nfunc getConfigNodeByGroup(group string, master bool) (*ConfigNode, error) {\n if list, ok := config.c[group]; ok {\n \/\/ 将master, slave集群列表拆分出来\n masterList := make(ConfigGroup, 0)\n slaveList := make(ConfigGroup, 0)\n for i := 0; i < len(list); i++ {\n if list[i].Role == \"slave\" {\n slaveList = append(slaveList, list[i])\n } else {\n masterList = append(masterList, list[i])\n }\n }\n if len(masterList) < 1 {\n return nil, errors.New(\"at least one master node configuration's need to make sense\")\n }\n if len(slaveList) < 1 {\n slaveList = masterList\n }\n if master {\n return getConfigNodeByPriority(masterList), nil\n } else {\n return getConfigNodeByPriority(slaveList), nil\n }\n } else {\n return nil, errors.New(fmt.Sprintf(\"empty database configuration for item name '%s'\", group))\n }\n}\n\n\/\/ 按照负载均衡算法(优先级配置)从数据库集群中选择一个配置节点出来使用\n\/\/ 算法说明举例,\n\/\/ 1、假如2个节点的priority都是1,那么随机大小范围为[0, 199];\n\/\/ 2、那么节点1的权重范围为[0, 99],节点2的权重范围为[100, 199],比例为1:1;\n\/\/ 3、假如计算出的随机数为99;\n\/\/ 4、那么选择的配置为节点1;\nfunc getConfigNodeByPriority(cg ConfigGroup) *ConfigNode {\n\tif len(cg) < 2 {\n\t\treturn &cg[0]\n\t}\n\tvar total int\n\tfor i := 0; i < len(cg); i++ {\n\t\ttotal += cg[i].Priority * 100\n\t}\n\t\/\/ 如果total为0表示所有连接都没有配置priority属性,那么默认都是1\n\tif total == 0 {\n for i := 0; i < len(cg); i++ {\n cg[i].Priority = 1\n total += cg[i].Priority * 100\n }\n }\n\t\/\/ 不能取到末尾的边界点\n\tr := grand.Rand(0, total)\n\tif r > 0 {\n\t\tr -= 1\n\t}\n\tmin := 0\n\tmax := 0\n\tfor i := 0; i < len(cg); i++ {\n\t\tmax = min + cg[i].Priority*100\n\t\t\/\/fmt.Printf(\"r: %d, min: %d, max: %d\\n\", r, min, max)\n\t\tif r >= min && r < max {\n\t\t\treturn &cg[i]\n\t\t} else {\n\t\t\tmin = max\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ 获得底层数据库链接对象\nfunc (bs *dbBase) getSqlDb(master bool) (sqlDb *sql.DB, err error) {\n \/\/ 负载均衡\n node, err := getConfigNodeByGroup(bs.group, master)\n if err != nil {\n return nil, err\n }\n \/\/ 默认值设定\n if node.Charset == \"\" {\n node.Charset = \"utf8\"\n }\n v := bs.cache.GetOrSetFuncLock(node.String(), func() interface{} {\n sqlDb, err = bs.db.Open(node)\n if err != nil {\n return nil\n }\n\n if n := bs.maxIdleConnCount.Val(); n > 0 {\n sqlDb.SetMaxIdleConns(n)\n } else if node.MaxIdleConnCount > 0 {\n sqlDb.SetMaxIdleConns(node.MaxIdleConnCount)\n }\n\n if n := bs.maxOpenConnCount.Val(); n > 0 {\n sqlDb.SetMaxOpenConns(n)\n } else if node.MaxOpenConnCount > 0 {\n sqlDb.SetMaxOpenConns(node.MaxOpenConnCount)\n }\n\n if n := bs.maxConnLifetime.Val(); n > 0 {\n sqlDb.SetConnMaxLifetime(time.Duration(n) * time.Second)\n } else if node.MaxConnLifetime > 0 {\n sqlDb.SetConnMaxLifetime(time.Duration(node.MaxConnLifetime) * time.Second)\n }\n return sqlDb\n }, 0)\n if v != nil && sqlDb == nil {\n sqlDb = v.(*sql.DB)\n }\n \/\/ 是否手动选择数据库\n if v := bs.schema.Val(); v != \"\" {\n sqlDb.Exec(\"USE \" + v)\n }\n return\n}\n\n\/\/ 切换操作的数据库(注意该切换是全局的)\nfunc (bs *dbBase) SetSchema(schema string) {\n bs.schema.Set(schema)\n}\n\n\/\/ 创建底层数据库master链接对象\nfunc (bs *dbBase) Master() (*sql.DB, error) {\n\treturn bs.getSqlDb(true)\n}\n\n\/\/ 创建底层数据库slave链接对象\nfunc (bs *dbBase) Slave() (*sql.DB, error) {\n return bs.getSqlDb(false)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"fmt\"\n \"math\"\n \"math\/rand\"\n \"time\"\n\n log \"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n xMax = 500\n yMax = 500\n xMin = 0\n yMin = 0\n levelUpSeconds = 600\n levelUpBase = float64(1.16)\n)\n\ntype Game struct {\n startedAt time.Time\n heroes []Hero\n adminToken string\n joinChan chan JoinRequest\n activateHeroChan chan ActivateHeroRequest\n exitChan chan []byte\n}\n\ntype Hero struct {\n Name string `json:\"name\"`\n Email string `json:\"email\"`\n Class string `json:\"class\"`\n Enabled bool `json:\"enabled\"`\n token string\n Level int `json:\"level\"`\n nextLevelAt time.Time\n createdAt time.Time\n Equipment Equipment `json:\"equipment\"`\n Xpos int `json:\"x_pos\"`\n Ypos int `json:\"y_pos\"`\n}\n\ntype Equipment struct {\n Ring int\n Amulet int\n Charm int\n Weapon int\n Helm int\n Tunic int\n Gloves int\n Shield int\n Leggings int\n Boots int\n}\n\n\/\/ NewGame creates a new game\nfunc NewGame(adminToken string) *Game {\n game := &Game{\n startedAt: time.Now(),\n heroes: []Hero{},\n joinChan: make(chan JoinRequest),\n activateHeroChan: make(chan ActivateHeroRequest),\n exitChan: make(chan []byte),\n adminToken: adminToken,\n }\n return game\n}\n\n\/\/ StartGame starts the game\nfunc StartGame(adminToken string) {\n game := NewGame(adminToken)\n\n go game.StartEngine()\n game.StartAPI()\n}\n\n\/\/ StartEngine starts the engine\nfunc (g *Game) StartEngine() {\n ticker := time.NewTicker(time.Second * 2)\n for {\n select {\n case <-ticker.C:\n g.moveHeroes()\n g.checkLevels()\n \/\/TODO: check battles\n case req := <-g.joinChan:\n log.Info(\"Join hero\")\n success, message := g.joinHero(req.name, req.email, req.heroClass, req.TokenRequest.token)\n req.Response <- GameResponse{success: success, message: message}\n close(req.Response)\n case req := <-g.activateHeroChan:\n log.Info(\"Activate hero\")\n success := g.activateHero(req.name, req.TokenRequest.token)\n req.Response <- GameResponse{success: success, message: \"\"}\n close(req.Response)\n case <-g.exitChan:\n log.Info(\"Exiting game\")\n return\n }\n }\n\n}\n\nfunc (g *Game) joinHero(name, email, class, adminToken string) (bool, string) {\n\n if !g.authorizeAdmin(adminToken) {\n return false, \"You are not authorized to perform this action.\"\n }\n\n hero := &Hero{\n Name: name,\n Email: email,\n Class: class,\n Enabled: false,\n token: randToken(),\n Level: 0,\n nextLevelAt: time.Now().Add(99999 * time.Hour),\n createdAt: time.Now(),\n Equipment: Equipment{\n Ring: 0,\n Amulet: 0,\n Charm: 0,\n Weapon: 0,\n Helm: 0,\n Tunic: 0,\n Gloves: 0,\n Shield: 0,\n Leggings: 0,\n Boots: 0,\n },\n Xpos: rand.Intn(xMax-xMin) + xMin,\n Ypos: rand.Intn(yMax-yMin) + yMin,\n }\n\n g.heroes = append(g.heroes, *hero)\n log.Infof(\"Hero %s has been created, but will not play until it's activated.\", hero.Name)\n return true, fmt.Sprintf(\"Token: %s\", hero.token)\n}\n\nfunc (g *Game) activateHero(name, token string) bool {\n i := g.getHeroIndex(name)\n if i == -1 {\n return false\n }\n if g.heroes[i].token != token {\n return false\n }\n\n ttl := getTTL(1) \/\/ Time to level 1\n g.heroes[i].Enabled = true\n g.heroes[i].nextLevelAt = time.Now().Add(ttl * time.Second)\n log.Infof(\"Success! Hero %s has been activated and will reach level 1 in %d seconds.\", g.heroes[i].Name, ttl)\n return true\n}\n\nfunc (g *Game) moveHeroes() {\n for i := range g.heroes {\n if !g.heroes[i].Enabled {\n continue\n }\n g.heroes[i].Xpos = truncateInt(g.heroes[i].Xpos+(rand.Intn(3)-1), xMin, xMax)\n g.heroes[i].Ypos = truncateInt(g.heroes[i].Ypos+(rand.Intn(3)-1), yMin, yMax)\n }\n}\n\nfunc (g *Game) checkLevels() {\n for i := range g.heroes {\n if !g.heroes[i].Enabled {\n continue\n }\n\n if g.heroes[i].nextLevelAt.Before(time.Now()) {\n level := g.heroes[i].Level + 1\n ttl := getTTL(level)\n g.heroes[i].nextLevelAt = time.Now().Add(ttl * time.Second)\n g.heroes[i].Level = level\n log.Infof(\"Hero %s reached level %d. Next level in %d seconds.\", g.heroes[i].Name, level, ttl)\n }\n }\n}\n\nfunc (g *Game) authorizeAdmin(token string) bool {\n return g.adminToken == token\n}\n\nfunc (g *Game) getHeroIndex(name string) int {\n for i, hero := range g.heroes {\n if hero.Name == name {\n return i\n }\n }\n return -1\n}\n\nfunc getTTL(level int) time.Duration {\n return time.Duration(levelUpSeconds * (math.Pow(levelUpBase, float64(level))))\n}\n<commit_msg>Fixes bug calculating TTL<commit_after>package main\n\nimport (\n \"fmt\"\n \"math\"\n \"math\/rand\"\n \"time\"\n\n log \"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n xMax = 500\n yMax = 500\n xMin = 0\n yMin = 0\n levelUpSeconds = 600\n levelUpBase = float64(1.16)\n)\n\ntype Game struct {\n startedAt time.Time\n heroes []Hero\n adminToken string\n joinChan chan JoinRequest\n activateHeroChan chan ActivateHeroRequest\n exitChan chan []byte\n}\n\ntype Hero struct {\n Name string `json:\"name\"`\n Email string `json:\"email\"`\n Class string `json:\"class\"`\n Enabled bool `json:\"enabled\"`\n token string\n Level int `json:\"level\"`\n nextLevelAt time.Time\n createdAt time.Time\n Equipment Equipment `json:\"equipment\"`\n Xpos int `json:\"x_pos\"`\n Ypos int `json:\"y_pos\"`\n}\n\ntype Equipment struct {\n Ring int\n Amulet int\n Charm int\n Weapon int\n Helm int\n Tunic int\n Gloves int\n Shield int\n Leggings int\n Boots int\n}\n\n\/\/ NewGame creates a new game\nfunc NewGame(adminToken string) *Game {\n game := &Game{\n startedAt: time.Now(),\n heroes: []Hero{},\n joinChan: make(chan JoinRequest),\n activateHeroChan: make(chan ActivateHeroRequest),\n exitChan: make(chan []byte),\n adminToken: adminToken,\n }\n return game\n}\n\n\/\/ StartGame starts the game\nfunc StartGame(adminToken string) {\n game := NewGame(adminToken)\n\n go game.StartEngine()\n game.StartAPI()\n}\n\n\/\/ StartEngine starts the engine\nfunc (g *Game) StartEngine() {\n ticker := time.NewTicker(time.Second * 2)\n for {\n select {\n case <-ticker.C:\n g.moveHeroes()\n g.checkLevels()\n \/\/TODO: check battles\n case req := <-g.joinChan:\n log.Info(\"Join hero\")\n success, message := g.joinHero(req.name, req.email, req.heroClass, req.TokenRequest.token)\n req.Response <- GameResponse{success: success, message: message}\n close(req.Response)\n case req := <-g.activateHeroChan:\n log.Info(\"Activate hero\")\n success := g.activateHero(req.name, req.TokenRequest.token)\n req.Response <- GameResponse{success: success, message: \"\"}\n close(req.Response)\n case <-g.exitChan:\n log.Info(\"Exiting game\")\n return\n }\n }\n\n}\n\nfunc (g *Game) joinHero(name, email, class, adminToken string) (bool, string) {\n\n if !g.authorizeAdmin(adminToken) {\n return false, \"You are not authorized to perform this action.\"\n }\n\n hero := &Hero{\n Name: name,\n Email: email,\n Class: class,\n Enabled: false,\n token: randToken(),\n Level: 0,\n nextLevelAt: time.Now().Add(99999 * time.Hour),\n createdAt: time.Now(),\n Equipment: Equipment{\n Ring: 0,\n Amulet: 0,\n Charm: 0,\n Weapon: 0,\n Helm: 0,\n Tunic: 0,\n Gloves: 0,\n Shield: 0,\n Leggings: 0,\n Boots: 0,\n },\n Xpos: rand.Intn(xMax-xMin) + xMin,\n Ypos: rand.Intn(yMax-yMin) + yMin,\n }\n\n g.heroes = append(g.heroes, *hero)\n log.Infof(\"Hero %s has been created, but will not play until it's activated.\", hero.Name)\n return true, fmt.Sprintf(\"Token: %s\", hero.token)\n}\n\nfunc (g *Game) activateHero(name, token string) bool {\n i := g.getHeroIndex(name)\n if i == -1 {\n return false\n }\n if g.heroes[i].token != token {\n return false\n }\n\n ttl := getTTL(1) \/\/ Time to level 1\n g.heroes[i].Enabled = true\n g.heroes[i].nextLevelAt = time.Now().Add(ttl * time.Second)\n log.Infof(\"Success! Hero %s has been activated and will reach level 1 in %d seconds.\", g.heroes[i].Name, ttl)\n return true\n}\n\nfunc (g *Game) moveHeroes() {\n for i := range g.heroes {\n if !g.heroes[i].Enabled {\n continue\n }\n g.heroes[i].Xpos = truncateInt(g.heroes[i].Xpos+(rand.Intn(3)-1), xMin, xMax)\n g.heroes[i].Ypos = truncateInt(g.heroes[i].Ypos+(rand.Intn(3)-1), yMin, yMax)\n }\n}\n\nfunc (g *Game) checkLevels() {\n for i := range g.heroes {\n if !g.heroes[i].Enabled {\n continue\n }\n\n if g.heroes[i].nextLevelAt.Before(time.Now()) {\n level := g.heroes[i].Level + 1\n ttl := getTTL(level + 1)\n g.heroes[i].nextLevelAt = time.Now().Add(ttl * time.Second)\n g.heroes[i].Level = level\n log.Infof(\"Hero %s reached level %d. Next level in %d seconds.\", g.heroes[i].Name, level, ttl)\n }\n }\n}\n\nfunc (g *Game) authorizeAdmin(token string) bool {\n return g.adminToken == token\n}\n\nfunc (g *Game) getHeroIndex(name string) int {\n for i, hero := range g.heroes {\n if hero.Name == name {\n return i\n }\n }\n return -1\n}\n\nfunc getTTL(level int) time.Duration {\n return time.Duration(levelUpSeconds * (math.Pow(levelUpBase, float64(level))))\n}\n<|endoftext|>"} {"text":"<commit_before>package protocol\n\nimport (\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"math\/big\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc newTestSmartPool() *SmartPool {\n\treturn NewSmartPool(\n\t\t&testPoolMonitor{},\n\t\t&testShareReceiver{}, &testNetworkClient{},\n\t\t&testClaimRepo{}, &testContract{},\n\t\tcommon.HexToAddress(\"0x001aDBc838eDe392B5B054A47f8B8c28f2fA9F3F\"),\n\t\tcommon.HexToAddress(\"0x001aDBc838eDe392B5B054A47f8B8c28f2fA9F3F\"),\n\t\t\"extradata\", time.Minute,\n\t\t100, true,\n\t)\n}\n\nfunc TestSmartPoolRegisterMinerAfterRegister(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\tif !sp.Register(common.Address{}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolRegisterMinerWhenUnableToRegister(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registerable = false\n\tif sp.Register(common.Address{}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolRegisterMinerWhenAbleToRegister(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registerable = true\n\tsp.Contract = testContract\n\tif !sp.Register(common.Address{}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolReturnAWorkToMiner(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.GetWork()\n}\n\nfunc TestSmartPoolAcceptSolution(t *testing.T) {\n\tsp := newTestSmartPool()\n\tif !sp.AcceptSolution(&testSolution{Counter: big.NewInt(10)}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolNotAcceptSolution(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.LatestCounter = big.NewInt(10)\n\tif sp.AcceptSolution(&testSolution{Counter: big.NewInt(9)}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolPackageAllCurrentShares(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.LatestCounter = big.NewInt(5)\n\tclaim := sp.GetCurrentClaim(1)\n\tif claim != nil {\n\t\tt.Fail()\n\t}\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(8)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(10)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(5)})\n\tclaim = sp.GetCurrentClaim(1)\n\tif claim.NumShares().Cmp(big.NewInt(3)) != 0 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSubmitCorrectClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(8)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(10)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(5)})\n\tsp.Submit()\n\n\ttestContract := sp.Contract.(*testContract)\n\tclaim := testContract.GetLastSubmittedClaim()\n\tif claim.NumShares().Cmp(big.NewInt(4)) != 0 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolReturnFalseIfNoClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tif ok, _ := sp.Submit(); ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSuccessfullySubmitAndVerifyClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tif ok, _ := sp.Submit(); !ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolGetCorrectShareIndex(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.Submit()\n\tc := sp.Contract.(*testContract)\n\tif c.IndexRequestedTime == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolGetCorrectShareIndexAfterSubmitClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.Submit()\n\tc := sp.Contract.(*testContract)\n\tif (*c.SubmitTime).After(*c.IndexRequestedTime) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSubmitReturnFalseWhenUnableToSubmit(t *testing.T) {\n\tsp := newTestSmartPool()\n\tc := sp.Contract.(*testContract)\n\tc.SubmitFailed = true\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tif ok, _ := sp.Submit(); ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSubmitReturnFalseWhenUnableToVerify(t *testing.T) {\n\tsp := newTestSmartPool()\n\tc := sp.Contract.(*testContract)\n\tc.VerifyFailed = true\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tif ok, _ := sp.Submit(); ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolDoesntRunWhenMinerRegistered(t *testing.T) {\n\tsp := newTestSmartPool()\n\tif sp.Run() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolOnlySubmitPeriodly(t *testing.T) {\n\tsp := newTestSmartPool()\n\tct := sp.Contract.(*testContract)\n\tct.Registered = true\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tc := sp.Contract.(*testContract)\n\tsp.SubmitInterval = 40 * time.Millisecond\n\tsp.ShareThreshold = 1\n\tsp.Run()\n\tif c.GetLastSubmittedClaim() != nil {\n\t\tt.Fail()\n\t}\n\ttime.Sleep(60 * time.Millisecond)\n\tif c.GetLastSubmittedClaim() == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolOnlySubmitWhenMeetShareThreshold(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tc := sp.Contract.(*testContract)\n\tsp.SubmitInterval = 40 * time.Millisecond\n\tsp.ShareThreshold = 3\n\tsp.Run()\n\ttime.Sleep(60 * time.Millisecond)\n\tif c.GetLastSubmittedClaim() != nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolOnlyRunAfterNetworkReady(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\tnw := sp.NetworkClient.(*testNetworkClient)\n\tnw.NotReadyToMine = true\n\tran := make(chan bool, 1)\n\ttimeout := make(chan bool, 1)\n\tgo func() {\n\t\tran <- sp.Run()\n\t}()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-ran:\n\t\tt.Fail()\n\tcase <-timeout:\n\t\tbreak\n\t}\n}\n\nfunc TestSmartPoolStopIfClientVersionChangedInHotStopMode(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\ttimeout := make(chan bool, 1)\n\tsp.PoolMonitor.(*testPoolMonitor).ClientUpdate = true\n\tsp.Run()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-sp.SubmitterStopped:\n\t\tbreak\n\tcase <-timeout:\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolDoesntStopIfHotStopModeIsDisabled(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.HotStop = false\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\ttimeout := make(chan bool, 1)\n\tsp.PoolMonitor.(*testPoolMonitor).ContractUpdate = true\n\tsp.Run()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-sp.SubmitterStopped:\n\t\tt.Fail()\n\tcase <-timeout:\n\t\tbreak\n\t}\n}\n\nfunc TestSmartPoolStopIfContractAddressChangedInHotStopMode(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\ttimeout := make(chan bool, 1)\n\tsp.PoolMonitor.(*testPoolMonitor).ContractUpdate = true\n\tsp.Run()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-sp.SubmitterStopped:\n\t\tbreak\n\tcase <-timeout:\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolRememberLatestCounterAfterFormAClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.LatestCounter = big.NewInt(5)\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(100)})\n\tct := sp.Contract.(*testContract)\n\tct.DelayedVerification = true\n\tgo sp.Submit()\n\tt.Logf(\"latest counter: %s\\n\", sp.LatestCounter)\n\tif sp.LatestCounter.Int64() != 100 {\n\t\tt.Fail()\n\t}\n}\n\n\/\/\n\/\/ func TestSmartPoolConstructsAShare(t *testing.T) {\n\/\/ \tsp := newTestSmartPool()\n\/\/ }\n\/\/\n<commit_msg>remove redundant test<commit_after>package protocol\n\nimport (\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"math\/big\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc newTestSmartPool() *SmartPool {\n\treturn NewSmartPool(\n\t\t&testPoolMonitor{},\n\t\t&testShareReceiver{}, &testNetworkClient{},\n\t\t&testClaimRepo{}, &testContract{},\n\t\tcommon.HexToAddress(\"0x001aDBc838eDe392B5B054A47f8B8c28f2fA9F3F\"),\n\t\tcommon.HexToAddress(\"0x001aDBc838eDe392B5B054A47f8B8c28f2fA9F3F\"),\n\t\t\"extradata\", time.Minute,\n\t\t100, true,\n\t)\n}\n\nfunc TestSmartPoolRegisterMinerAfterRegister(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\tif !sp.Register(common.Address{}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolRegisterMinerWhenUnableToRegister(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registerable = false\n\tif sp.Register(common.Address{}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolRegisterMinerWhenAbleToRegister(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registerable = true\n\tsp.Contract = testContract\n\tif !sp.Register(common.Address{}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolReturnAWorkToMiner(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.GetWork()\n}\n\nfunc TestSmartPoolAcceptSolution(t *testing.T) {\n\tsp := newTestSmartPool()\n\tif !sp.AcceptSolution(&testSolution{Counter: big.NewInt(10)}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolNotAcceptSolution(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.LatestCounter = big.NewInt(10)\n\tif sp.AcceptSolution(&testSolution{Counter: big.NewInt(9)}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolPackageAllCurrentShares(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.LatestCounter = big.NewInt(5)\n\tclaim := sp.GetCurrentClaim(1)\n\tif claim != nil {\n\t\tt.Fail()\n\t}\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(8)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(10)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(5)})\n\tclaim = sp.GetCurrentClaim(1)\n\tif claim.NumShares().Cmp(big.NewInt(3)) != 0 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSubmitCorrectClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(8)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(10)})\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(5)})\n\tsp.Submit()\n\n\ttestContract := sp.Contract.(*testContract)\n\tclaim := testContract.GetLastSubmittedClaim()\n\tif claim.NumShares().Cmp(big.NewInt(4)) != 0 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolReturnFalseIfNoClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tif ok, _ := sp.Submit(); ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSuccessfullySubmitAndVerifyClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tif ok, _ := sp.Submit(); !ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolGetCorrectShareIndex(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.Submit()\n\tc := sp.Contract.(*testContract)\n\tif c.IndexRequestedTime == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolGetCorrectShareIndexAfterSubmitClaim(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.ShareThreshold = 1\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tsp.Submit()\n\tc := sp.Contract.(*testContract)\n\tif (*c.SubmitTime).After(*c.IndexRequestedTime) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSubmitReturnFalseWhenUnableToSubmit(t *testing.T) {\n\tsp := newTestSmartPool()\n\tc := sp.Contract.(*testContract)\n\tc.SubmitFailed = true\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tif ok, _ := sp.Submit(); ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolSubmitReturnFalseWhenUnableToVerify(t *testing.T) {\n\tsp := newTestSmartPool()\n\tc := sp.Contract.(*testContract)\n\tc.VerifyFailed = true\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tif ok, _ := sp.Submit(); ok {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolDoesntRunWhenMinerRegistered(t *testing.T) {\n\tsp := newTestSmartPool()\n\tif sp.Run() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolOnlySubmitPeriodly(t *testing.T) {\n\tsp := newTestSmartPool()\n\tct := sp.Contract.(*testContract)\n\tct.Registered = true\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tc := sp.Contract.(*testContract)\n\tsp.SubmitInterval = 40 * time.Millisecond\n\tsp.ShareThreshold = 1\n\tsp.Run()\n\tif c.GetLastSubmittedClaim() != nil {\n\t\tt.Fail()\n\t}\n\ttime.Sleep(60 * time.Millisecond)\n\tif c.GetLastSubmittedClaim() == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolOnlySubmitWhenMeetShareThreshold(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.AcceptSolution(&testSolution{Counter: big.NewInt(9)})\n\tc := sp.Contract.(*testContract)\n\tsp.SubmitInterval = 40 * time.Millisecond\n\tsp.ShareThreshold = 3\n\tsp.Run()\n\ttime.Sleep(60 * time.Millisecond)\n\tif c.GetLastSubmittedClaim() != nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolOnlyRunAfterNetworkReady(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\tnw := sp.NetworkClient.(*testNetworkClient)\n\tnw.NotReadyToMine = true\n\tran := make(chan bool, 1)\n\ttimeout := make(chan bool, 1)\n\tgo func() {\n\t\tran <- sp.Run()\n\t}()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-ran:\n\t\tt.Fail()\n\tcase <-timeout:\n\t\tbreak\n\t}\n}\n\nfunc TestSmartPoolStopIfClientVersionChangedInHotStopMode(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\ttimeout := make(chan bool, 1)\n\tsp.PoolMonitor.(*testPoolMonitor).ClientUpdate = true\n\tsp.Run()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-sp.SubmitterStopped:\n\t\tbreak\n\tcase <-timeout:\n\t\tt.Fail()\n\t}\n}\n\nfunc TestSmartPoolDoesntStopIfHotStopModeIsDisabled(t *testing.T) {\n\tsp := newTestSmartPool()\n\tsp.HotStop = false\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\ttimeout := make(chan bool, 1)\n\tsp.PoolMonitor.(*testPoolMonitor).ContractUpdate = true\n\tsp.Run()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-sp.SubmitterStopped:\n\t\tt.Fail()\n\tcase <-timeout:\n\t\tbreak\n\t}\n}\n\nfunc TestSmartPoolStopIfContractAddressChangedInHotStopMode(t *testing.T) {\n\tsp := newTestSmartPool()\n\ttestContract := sp.Contract.(*testContract)\n\ttestContract.Registered = true\n\ttimeout := make(chan bool, 1)\n\tsp.PoolMonitor.(*testPoolMonitor).ContractUpdate = true\n\tsp.Run()\n\tgo func() {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttimeout <- true\n\t}()\n\tselect {\n\tcase <-sp.SubmitterStopped:\n\t\tbreak\n\tcase <-timeout:\n\t\tt.Fail()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage juju\n\nimport (\n\t\"github.com\/flaviamissi\/go-elb\/aws\"\n\t\"github.com\/flaviamissi\/go-elb\/elb\"\n\t\"github.com\/flaviamissi\/go-elb\/elb\/elbtest\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"github.com\/globocom\/tsuru\/testing\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t. \"launchpad.net\/gocheck\"\n)\n\ntype ELBSuite struct {\n\tserver *elbtest.Server\n\tclient *elb.ELB\n\tcName string\n}\n\nvar _ = Suite(&ELBSuite{})\n\nfunc (s *ELBSuite) SetUpSuite(c *C) {\n\tvar err error\n\tdb.Session, err = db.Open(\"127.0.0.1:27017\", \"juju_tests\")\n\tc.Assert(err, IsNil)\n\ts.server, err = elbtest.NewServer()\n\tc.Assert(err, IsNil)\n\tconfig.Set(\"juju:elb-endpoint\", s.server.URL())\n\tconfig.Set(\"juju:use-elb\", true)\n\tregion := aws.SAEast\n\tregion.ELBEndpoint = s.server.URL()\n\ts.client = elb.New(aws.Auth{AccessKey: \"some\", SecretKey: \"thing\"}, region)\n\tc.Assert(err, IsNil)\n\ts.cName = \"juju_test_elbs\"\n\tconfig.Set(\"juju:elb-collection\", s.cName)\n\tconfig.Set(\"juju:elb-avail-zones\", []interface{}{\"my-zone-1a\", \"my-zone-1b\"})\n\tconfig.Set(\"aws:access-key-id\", \"access\")\n\tconfig.Set(\"aws:secret-access-key\", \"s3cr3t\")\n}\n\nfunc (s *ELBSuite) TearDownSuite(c *C) {\n\tconfig.Unset(\"juju:use-elb\")\n\tdb.Session.Close()\n\ts.server.Quit()\n}\n\nfunc (s *ELBSuite) TestGetCollection(c *C) {\n\tmanager := ELBManager{}\n\tcoll := manager.collection()\n\tother := db.Session.Collection(s.cName)\n\tc.Assert(coll, DeepEquals, other)\n}\n\nfunc (s *ELBSuite) TestGetELBClient(c *C) {\n\tmanager := ELBManager{}\n\telb := manager.elb()\n\tc.Assert(elb.ELBEndpoint, Equals, s.server.URL())\n}\n\nfunc (s *ELBSuite) TestCreateELB(c *C) {\n\tapp := testing.NewFakeApp(\"together\", \"gotthard\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer s.client.DeleteLoadBalancer(app.GetName())\n\tdefer manager.collection().Remove(bson.M{\"name\": app.GetName()})\n\tresp, err := s.client.DescribeLoadBalancers(\"together\")\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tc.Assert(resp.LoadBalancerDescriptions[0].ListenerDescriptions, HasLen, 1)\n\tlistener := resp.LoadBalancerDescriptions[0].ListenerDescriptions[0].Listener\n\tc.Assert(listener.InstancePort, Equals, 80)\n\tc.Assert(listener.LoadBalancerPort, Equals, 80)\n\tc.Assert(listener.InstanceProtocol, Equals, \"HTTP\")\n\tc.Assert(listener.Protocol, Equals, \"HTTP\")\n\tc.Assert(listener.SSLCertificateId, Equals, \"\")\n\tdnsName := resp.LoadBalancerDescriptions[0].DNSName\n\tvar lb loadBalancer\n\terr = db.Session.Collection(s.cName).Find(bson.M{\"name\": app.GetName()}).One(&lb)\n\tc.Assert(err, IsNil)\n\tc.Assert(lb.DNSName, Equals, dnsName)\n}\n\nfunc (s *ELBSuite) TestCreateELBUsingVPC(c *C) {\n\told, _ := config.Get(\"juju:elb-avail-zones\")\n\tconfig.Unset(\"juju:elb-avail-zones\")\n\tconfig.Set(\"juju:elb-use-vpc\", true)\n\tconfig.Set(\"juju:elb-vpc-subnets\", []string{\"subnet-a4a3a2a1\", \"subnet-002200\"})\n\tconfig.Set(\"juju:elb-vpc-secgroups\", []string{\"sg-0900\"})\n\tdefer func() {\n\t\tconfig.Set(\"juju:elb-avail-zones\", old)\n\t\tconfig.Unset(\"juju:elb-use-vpc\")\n\t\tconfig.Unset(\"juju:elb-vpc-subnets\")\n\t\tconfig.Unset(\"juju:elb-vpc-secgroups\")\n\t}()\n\tapp := testing.NewFakeApp(\"relax\", \"who\", 1)\n\tmanager := ELBManager{}\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer s.client.DeleteLoadBalancer(app.GetName())\n\tdefer manager.collection().Remove(bson.M{\"name\": app.GetName()})\n\tresp, err := s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tlbd := resp.LoadBalancerDescriptions[0]\n\tc.Assert(lbd.Subnets, DeepEquals, []string{\"subnet-a4a3a2a1\", \"subnet-002200\"})\n\tc.Assert(lbd.SecurityGroups, DeepEquals, []string{\"sg-0900\"})\n\tc.Assert(lbd.Scheme, Equals, \"internal\")\n\tc.Assert(lbd.AvailZones, HasLen, 0)\n}\n\nfunc (s *ELBSuite) TestDestroyELB(c *C) {\n\tapp := testing.NewFakeApp(\"blue\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer s.client.DeleteLoadBalancer(app.GetName()) \/\/ sanity\n\tdefer manager.collection().Remove(bson.M{\"name\": app.GetName()}) \/\/ sanity\n\terr = manager.Destroy(app)\n\tc.Assert(err, IsNil)\n\t_, err = s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, NotNil)\n\tc.Assert(err, ErrorMatches, `^.*\\(LoadBalancerNotFound\\)$`)\n\tn, err := manager.collection().Find(bson.M{\"name\": app.GetName()}).Count()\n\tc.Assert(err, IsNil)\n\tc.Assert(n, Equals, 0)\n}\n\nfunc (s *ELBSuite) TestRegisterUnit(c *C) {\n\tid1 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id1)\n\tid2 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id2)\n\tapp := testing.NewFakeApp(\"fooled\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer manager.Destroy(app)\n\terr = manager.Register(app, provision.Unit{InstanceId: id1}, provision.Unit{InstanceId: id2})\n\tc.Assert(err, IsNil)\n\tresp, err := s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tc.Assert(resp.LoadBalancerDescriptions[0].Instances, HasLen, 2)\n\tinstances := resp.LoadBalancerDescriptions[0].Instances\n\tc.Assert(instances[0].InstanceId, Equals, id1)\n\tc.Assert(instances[1].InstanceId, Equals, id2)\n}\n\nfunc (s *ELBSuite) TestDeregisterUnit(c *C) {\n\tid1 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id1)\n\tid2 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id2)\n\tunit1 := provision.Unit{InstanceId: id1}\n\tunit2 := provision.Unit{InstanceId: id2}\n\tapp := testing.NewFakeApp(\"dirty\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer manager.Destroy(app)\n\terr = manager.Register(app, unit1, unit2)\n\tc.Assert(err, IsNil)\n\terr = manager.Deregister(app, unit1, unit2)\n\tc.Assert(err, IsNil)\n\tresp, err := s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tc.Assert(resp.LoadBalancerDescriptions[0].Instances, HasLen, 0)\n}\n\nfunc (s *ELBSuite) TestAddr(c *C) {\n\tapp := testing.NewFakeApp(\"enough\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer manager.Destroy(app)\n\tvar lb loadBalancer\n\terr = manager.collection().Find(bson.M{\"name\": app.GetName()}).One(&lb)\n\tc.Assert(err, IsNil)\n\taddr, err := manager.Addr(app)\n\tc.Assert(err, IsNil)\n\tc.Assert(addr, Equals, lb.DNSName)\n}\n\nfunc (s *ELBSuite) TestAddrUnknownLoadBalancer(c *C) {\n\tapp := testing.NewFakeApp(\"five\", \"who\", 1)\n\tmanager := ELBManager{}\n\taddr, err := manager.Addr(app)\n\tc.Assert(addr, Equals, \"\")\n\tc.Assert(err, NotNil)\n\tc.Assert(err.Error(), Equals, \"not found\")\n}\n<commit_msg>provision\/juju: change assertion, so it's more reliable<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage juju\n\nimport (\n\t\"github.com\/flaviamissi\/go-elb\/aws\"\n\t\"github.com\/flaviamissi\/go-elb\/elb\"\n\t\"github.com\/flaviamissi\/go-elb\/elb\/elbtest\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"github.com\/globocom\/tsuru\/testing\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t. \"launchpad.net\/gocheck\"\n\t\"sort\"\n)\n\ntype ELBSuite struct {\n\tserver *elbtest.Server\n\tclient *elb.ELB\n\tcName string\n}\n\nvar _ = Suite(&ELBSuite{})\n\nfunc (s *ELBSuite) SetUpSuite(c *C) {\n\tvar err error\n\tdb.Session, err = db.Open(\"127.0.0.1:27017\", \"juju_tests\")\n\tc.Assert(err, IsNil)\n\ts.server, err = elbtest.NewServer()\n\tc.Assert(err, IsNil)\n\tconfig.Set(\"juju:elb-endpoint\", s.server.URL())\n\tconfig.Set(\"juju:use-elb\", true)\n\tregion := aws.SAEast\n\tregion.ELBEndpoint = s.server.URL()\n\ts.client = elb.New(aws.Auth{AccessKey: \"some\", SecretKey: \"thing\"}, region)\n\tc.Assert(err, IsNil)\n\ts.cName = \"juju_test_elbs\"\n\tconfig.Set(\"juju:elb-collection\", s.cName)\n\tconfig.Set(\"juju:elb-avail-zones\", []interface{}{\"my-zone-1a\", \"my-zone-1b\"})\n\tconfig.Set(\"aws:access-key-id\", \"access\")\n\tconfig.Set(\"aws:secret-access-key\", \"s3cr3t\")\n}\n\nfunc (s *ELBSuite) TearDownSuite(c *C) {\n\tconfig.Unset(\"juju:use-elb\")\n\tdb.Session.Close()\n\ts.server.Quit()\n}\n\nfunc (s *ELBSuite) TestGetCollection(c *C) {\n\tmanager := ELBManager{}\n\tcoll := manager.collection()\n\tother := db.Session.Collection(s.cName)\n\tc.Assert(coll, DeepEquals, other)\n}\n\nfunc (s *ELBSuite) TestGetELBClient(c *C) {\n\tmanager := ELBManager{}\n\telb := manager.elb()\n\tc.Assert(elb.ELBEndpoint, Equals, s.server.URL())\n}\n\nfunc (s *ELBSuite) TestCreateELB(c *C) {\n\tapp := testing.NewFakeApp(\"together\", \"gotthard\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer s.client.DeleteLoadBalancer(app.GetName())\n\tdefer manager.collection().Remove(bson.M{\"name\": app.GetName()})\n\tresp, err := s.client.DescribeLoadBalancers(\"together\")\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tc.Assert(resp.LoadBalancerDescriptions[0].ListenerDescriptions, HasLen, 1)\n\tlistener := resp.LoadBalancerDescriptions[0].ListenerDescriptions[0].Listener\n\tc.Assert(listener.InstancePort, Equals, 80)\n\tc.Assert(listener.LoadBalancerPort, Equals, 80)\n\tc.Assert(listener.InstanceProtocol, Equals, \"HTTP\")\n\tc.Assert(listener.Protocol, Equals, \"HTTP\")\n\tc.Assert(listener.SSLCertificateId, Equals, \"\")\n\tdnsName := resp.LoadBalancerDescriptions[0].DNSName\n\tvar lb loadBalancer\n\terr = db.Session.Collection(s.cName).Find(bson.M{\"name\": app.GetName()}).One(&lb)\n\tc.Assert(err, IsNil)\n\tc.Assert(lb.DNSName, Equals, dnsName)\n}\n\nfunc (s *ELBSuite) TestCreateELBUsingVPC(c *C) {\n\told, _ := config.Get(\"juju:elb-avail-zones\")\n\tconfig.Unset(\"juju:elb-avail-zones\")\n\tconfig.Set(\"juju:elb-use-vpc\", true)\n\tconfig.Set(\"juju:elb-vpc-subnets\", []string{\"subnet-a4a3a2a1\", \"subnet-002200\"})\n\tconfig.Set(\"juju:elb-vpc-secgroups\", []string{\"sg-0900\"})\n\tdefer func() {\n\t\tconfig.Set(\"juju:elb-avail-zones\", old)\n\t\tconfig.Unset(\"juju:elb-use-vpc\")\n\t\tconfig.Unset(\"juju:elb-vpc-subnets\")\n\t\tconfig.Unset(\"juju:elb-vpc-secgroups\")\n\t}()\n\tapp := testing.NewFakeApp(\"relax\", \"who\", 1)\n\tmanager := ELBManager{}\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer s.client.DeleteLoadBalancer(app.GetName())\n\tdefer manager.collection().Remove(bson.M{\"name\": app.GetName()})\n\tresp, err := s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tlbd := resp.LoadBalancerDescriptions[0]\n\tc.Assert(lbd.Subnets, DeepEquals, []string{\"subnet-a4a3a2a1\", \"subnet-002200\"})\n\tc.Assert(lbd.SecurityGroups, DeepEquals, []string{\"sg-0900\"})\n\tc.Assert(lbd.Scheme, Equals, \"internal\")\n\tc.Assert(lbd.AvailZones, HasLen, 0)\n}\n\nfunc (s *ELBSuite) TestDestroyELB(c *C) {\n\tapp := testing.NewFakeApp(\"blue\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer s.client.DeleteLoadBalancer(app.GetName()) \/\/ sanity\n\tdefer manager.collection().Remove(bson.M{\"name\": app.GetName()}) \/\/ sanity\n\terr = manager.Destroy(app)\n\tc.Assert(err, IsNil)\n\t_, err = s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, NotNil)\n\tc.Assert(err, ErrorMatches, `^.*\\(LoadBalancerNotFound\\)$`)\n\tn, err := manager.collection().Find(bson.M{\"name\": app.GetName()}).Count()\n\tc.Assert(err, IsNil)\n\tc.Assert(n, Equals, 0)\n}\n\nfunc (s *ELBSuite) TestRegisterUnit(c *C) {\n\tid1 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id1)\n\tid2 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id2)\n\tapp := testing.NewFakeApp(\"fooled\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer manager.Destroy(app)\n\terr = manager.Register(app, provision.Unit{InstanceId: id1}, provision.Unit{InstanceId: id2})\n\tc.Assert(err, IsNil)\n\tresp, err := s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tc.Assert(resp.LoadBalancerDescriptions[0].Instances, HasLen, 2)\n\tinstances := resp.LoadBalancerDescriptions[0].Instances\n\tids := []string{instances[0].InstanceId, instances[1].InstanceId}\n\tsort.Strings(ids)\n\tc.Assert(ids, DeepEquals, []string{id1, id2})\n}\n\nfunc (s *ELBSuite) TestDeregisterUnit(c *C) {\n\tid1 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id1)\n\tid2 := s.server.NewInstance()\n\tdefer s.server.RemoveInstance(id2)\n\tunit1 := provision.Unit{InstanceId: id1}\n\tunit2 := provision.Unit{InstanceId: id2}\n\tapp := testing.NewFakeApp(\"dirty\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer manager.Destroy(app)\n\terr = manager.Register(app, unit1, unit2)\n\tc.Assert(err, IsNil)\n\terr = manager.Deregister(app, unit1, unit2)\n\tc.Assert(err, IsNil)\n\tresp, err := s.client.DescribeLoadBalancers(app.GetName())\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.LoadBalancerDescriptions, HasLen, 1)\n\tc.Assert(resp.LoadBalancerDescriptions[0].Instances, HasLen, 0)\n}\n\nfunc (s *ELBSuite) TestAddr(c *C) {\n\tapp := testing.NewFakeApp(\"enough\", \"who\", 1)\n\tmanager := ELBManager{}\n\tmanager.e = s.client\n\terr := manager.Create(app)\n\tc.Assert(err, IsNil)\n\tdefer manager.Destroy(app)\n\tvar lb loadBalancer\n\terr = manager.collection().Find(bson.M{\"name\": app.GetName()}).One(&lb)\n\tc.Assert(err, IsNil)\n\taddr, err := manager.Addr(app)\n\tc.Assert(err, IsNil)\n\tc.Assert(addr, Equals, lb.DNSName)\n}\n\nfunc (s *ELBSuite) TestAddrUnknownLoadBalancer(c *C) {\n\tapp := testing.NewFakeApp(\"five\", \"who\", 1)\n\tmanager := ELBManager{}\n\taddr, err := manager.Addr(app)\n\tc.Assert(addr, Equals, \"\")\n\tc.Assert(err, NotNil)\n\tc.Assert(err.Error(), Equals, \"not found\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/\thttp:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\n\/\/ Package statemanager implements simple constructs for saving and restoring\n\/\/ state from disk.\n\/\/ It provides the interface for a StateManager which can read\/write arbitrary\n\/\/ json data from\/to disk.\npackage statemanager\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/config\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/logger\"\n)\n\n\/\/ The current version of saved data. Any backwards or forwards incompatible\n\/\/ changes to the data-format should increment this number and retain the\n\/\/ ability to read old data versions.\nconst EcsDataVersion = 1\n\n\/\/ Filename in the ECS_DATADIR\nconst ecsDataFile = \"ecs_agent_data.json\"\n\n\/\/ How frequently to flush to disk\nconst minSaveInterval = 10 * time.Second\n\nvar log = logger.ForModule(\"statemanager\")\n\n\/\/ Saveable types should be able to be json serializable and deserializable\n\/\/ Properly, this should have json.Marshaler\/json.Unmarshaler here, but string\n\/\/ and so on can be marshaled\/unmarshaled sanely but don't fit those interfaces.\ntype Saveable interface{}\n\n\/\/ Saver is a type that can be saved\ntype Saver interface {\n\tSave() error\n\tForceSave() error\n}\n\n\/\/ Option functions are functions that may be used as part of constructing a new\n\/\/ StateManager\ntype Option func(StateManager)\n\ntype saveableState map[string]*Saveable\ntype intermediateSaveableState map[string]json.RawMessage\n\n\/\/ State is a struct of all data that should be saveable\/loadable to disk. Each\n\/\/ element should be json-serializable.\n\/\/\n\/\/ Note, changing this to work with BinaryMarshaler or another more compact\n\/\/ format would be fine, but everything already needs a json representation\n\/\/ since that's our wire format and the extra space taken \/ IO-time is expected\n\/\/ to be fairly negligable.\ntype state struct {\n\tData saveableState\n\n\tVersion int\n}\n\ntype intermediateState struct {\n\tData intermediateSaveableState\n}\n\ntype versionOnlyState struct {\n\tVersion int\n}\n\n\/\/ A StateManager can load and save state from disk.\n\/\/ Load is not expected to return an error if there is no state to load.\ntype StateManager interface {\n\tSaver\n\tLoad() error\n}\n\ntype basicStateManager struct {\n\tstatePath string \/\/ The path to a file in which state can be serialized\n\n\tstate *state \/\/ pointers to the data we should save \/ load into\n\n\tsync.Mutex \/\/ guards save times\n\tlastSave time.Time \/\/the last time a save completed\n\tnextPlannedSave time.Time \/\/the next time a save is planned\n}\n\n\/\/ NewStateManager constructs a new StateManager which saves data at the\n\/\/ location specified in cfg and operates under the given options.\n\/\/ The returned StateManager will not save more often than every 10 seconds and\n\/\/ will not reliably return errors with Save, but will log them appropriately.\nfunc NewStateManager(cfg *config.Config, options ...Option) (StateManager, error) {\n\tfi, err := os.Stat(cfg.DataDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, errors.New(\"State manager DataDir must exist\")\n\t}\n\n\tstate := &state{\n\t\tData: make(saveableState),\n\t\tVersion: EcsDataVersion,\n\t}\n\tmanager := &basicStateManager{\n\t\tstatePath: cfg.DataDir,\n\t\tstate: state,\n\t}\n\n\tfor _, option := range options {\n\t\toption(manager)\n\t}\n\n\treturn manager, nil\n}\n\n\/\/ AddSaveable is an option that adds a given saveable as one that should be saved\n\/\/ under the given name. The name must be the same across uses of the\n\/\/ statemanager (e.g. program invocations) for it to be serialized and\n\/\/ deserialized correctly.\nfunc AddSaveable(name string, saveable Saveable) Option {\n\treturn (Option)(func(m StateManager) {\n\t\tmanager, ok := m.(*basicStateManager)\n\t\tif !ok {\n\t\t\tlog.Crit(\"Unable to add to state manager; unknown instantiation\")\n\t\t\treturn\n\t\t}\n\t\tmanager.state.Data[name] = &saveable\n\t})\n}\n\n\/\/ Save triggers a save to file, though respects a minimum save interval to wait\n\/\/ between saves.\nfunc (manager *basicStateManager) Save() error {\n\tmanager.Lock()\n\tdefer manager.Unlock()\n\tif time.Since(manager.lastSave) >= minSaveInterval {\n\t\t\/\/ we can just save\n\t\terr := manager.ForceSave()\n\t\tmanager.lastSave = time.Now()\n\t\tmanager.nextPlannedSave = time.Time{} \/\/ re-zero it; assume all pending desires to save are fulfilled\n\t\treturn err\n\t} else if manager.nextPlannedSave.IsZero() {\n\t\t\/\/ No save planned yet, we should plan one.\n\t\tnext := manager.lastSave.Add(minSaveInterval)\n\t\tmanager.nextPlannedSave = next\n\t\tgo func() {\n\t\t\ttime.Sleep(next.Sub(time.Now()))\n\t\t\tmanager.Save()\n\t\t}()\n\t}\n\t\/\/ else nextPlannedSave wasn't Zero so there's a save planned elsewhere that'll\n\t\/\/ fulfill this\n\treturn nil\n}\n\n\/\/ ForceSave saves the given State to a file. It is an atomic operation on POSIX\n\/\/ systems (by Renaming over the target file).\n\/\/ This function logs errors at will and does not necessarily expect the caller\n\/\/ to handle the error because there's little a caller can do in general other\n\/\/ than just keep going.\n\/\/ In addition, the StateManager internally buffers save requests in order to\n\/\/ only save at most every STATE_SAVE_INTERVAL.\nfunc (manager *basicStateManager) ForceSave() error {\n\tlog.Info(\"Saving state!\")\n\ts := manager.state\n\ts.Version = EcsDataVersion\n\n\tdata, err := json.Marshal(s)\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not marshal data; this is odd\", \"err\", err)\n\t\treturn err\n\t}\n\t\/\/ Make our temp-file on the same volume as our data-file to ensure we can\n\t\/\/ actually move it atomically; cross-device renaming will error out.\n\ttmpfile, err := ioutil.TempFile(manager.statePath, \"tmp_ecs_agent_data\")\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not create temp file to save state\", \"err\", err)\n\t\treturn err\n\t}\n\t_, err = tmpfile.Write(data)\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not write to temp file to save state\", \"err\", err)\n\t\treturn err\n\t}\n\terr = os.Rename(tmpfile.Name(), filepath.Join(manager.statePath, ecsDataFile))\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not move to data file\", \"err\", err)\n\t}\n\treturn err\n}\n\n\/\/ Load reads state off the disk from the well-known filepath and loads it into\n\/\/ the passed State object.\nfunc (manager *basicStateManager) Load() error {\n\t\/\/ Note that even if Save overwrites the file we're looking at here, we\n\t\/\/ still hold the old inode and should read the old data so no locking is\n\t\/\/ needed (given Linux and the ext* family of fs at least).\n\ts := manager.state\n\tlog.Info(\"Loading state!\")\n\tfile, err := os.Open(filepath.Join(manager.statePath, ecsDataFile))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t\/\/ Happens every first run; not a real error\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdata, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Error(\"Error reading existing state file\", \"err\", err)\n\t\treturn err\n\t}\n\t\/\/ Dry-run to make sure this is a version we can understand\n\ttmps := versionOnlyState{}\n\terr = json.Unmarshal(data, &tmps)\n\tif err != nil {\n\t\tlog.Crit(\"Could not unmarshal existing state; corrupted data?\", \"err\", err, \"data\", data)\n\t\treturn err\n\t}\n\tif tmps.Version > EcsDataVersion {\n\t\tstrversion := strconv.Itoa(tmps.Version)\n\t\treturn errors.New(\"Unsupported data format: Version \" + strversion + \" not \" + strconv.Itoa(EcsDataVersion))\n\t}\n\t\/\/ Now load it into the actual state. The reason we do this with the\n\t\/\/ intermediate state is that we *must* unmarshal directly into the\n\t\/\/ \"saveable\" pointers we were given in AddSaveable; if we unmarshal\n\t\/\/ directly into a map with values of pointers, those pointers are lost.\n\t\/\/ We *must* unmarshal this way because the existing pointers could have\n\t\/\/ semi-initialized data (and are actually expected to)\n\n\tvar intermediate intermediateState\n\terr = json.Unmarshal(data, &intermediate)\n\tif err != nil {\n\t\tlog.Debug(\"Could not unmarshal into intermediate\")\n\t\treturn err\n\t}\n\n\tfor key, rawJSON := range intermediate.Data {\n\t\tactualPointer, ok := manager.state.Data[key]\n\t\tif !ok {\n\t\t\tlog.Error(\"Loading state: potentially malformed json key of \" + key)\n\t\t\tcontinue\n\t\t}\n\t\terr = json.Unmarshal(rawJSON, actualPointer)\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Could not unmarshal into actual\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Debug(\"Loaded state!\", \"state\", s)\n\treturn nil\n}\n<commit_msg>State: Bump data format version<commit_after>\/\/ Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/\thttp:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\n\/\/ Package statemanager implements simple constructs for saving and restoring\n\/\/ state from disk.\n\/\/ It provides the interface for a StateManager which can read\/write arbitrary\n\/\/ json data from\/to disk.\npackage statemanager\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/config\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/logger\"\n)\n\n\/\/ The current version of saved data. Any backwards or forwards incompatible\n\/\/ changes to the data-format should increment this number and retain the\n\/\/ ability to read old data versions.\nconst EcsDataVersion = 2\n\n\/\/ Filename in the ECS_DATADIR\nconst ecsDataFile = \"ecs_agent_data.json\"\n\n\/\/ How frequently to flush to disk\nconst minSaveInterval = 10 * time.Second\n\nvar log = logger.ForModule(\"statemanager\")\n\n\/\/ Saveable types should be able to be json serializable and deserializable\n\/\/ Properly, this should have json.Marshaler\/json.Unmarshaler here, but string\n\/\/ and so on can be marshaled\/unmarshaled sanely but don't fit those interfaces.\ntype Saveable interface{}\n\n\/\/ Saver is a type that can be saved\ntype Saver interface {\n\tSave() error\n\tForceSave() error\n}\n\n\/\/ Option functions are functions that may be used as part of constructing a new\n\/\/ StateManager\ntype Option func(StateManager)\n\ntype saveableState map[string]*Saveable\ntype intermediateSaveableState map[string]json.RawMessage\n\n\/\/ State is a struct of all data that should be saveable\/loadable to disk. Each\n\/\/ element should be json-serializable.\n\/\/\n\/\/ Note, changing this to work with BinaryMarshaler or another more compact\n\/\/ format would be fine, but everything already needs a json representation\n\/\/ since that's our wire format and the extra space taken \/ IO-time is expected\n\/\/ to be fairly negligable.\ntype state struct {\n\tData saveableState\n\n\tVersion int\n}\n\ntype intermediateState struct {\n\tData intermediateSaveableState\n}\n\ntype versionOnlyState struct {\n\tVersion int\n}\n\n\/\/ A StateManager can load and save state from disk.\n\/\/ Load is not expected to return an error if there is no state to load.\ntype StateManager interface {\n\tSaver\n\tLoad() error\n}\n\ntype basicStateManager struct {\n\tstatePath string \/\/ The path to a file in which state can be serialized\n\n\tstate *state \/\/ pointers to the data we should save \/ load into\n\n\tsync.Mutex \/\/ guards save times\n\tlastSave time.Time \/\/the last time a save completed\n\tnextPlannedSave time.Time \/\/the next time a save is planned\n}\n\n\/\/ NewStateManager constructs a new StateManager which saves data at the\n\/\/ location specified in cfg and operates under the given options.\n\/\/ The returned StateManager will not save more often than every 10 seconds and\n\/\/ will not reliably return errors with Save, but will log them appropriately.\nfunc NewStateManager(cfg *config.Config, options ...Option) (StateManager, error) {\n\tfi, err := os.Stat(cfg.DataDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, errors.New(\"State manager DataDir must exist\")\n\t}\n\n\tstate := &state{\n\t\tData: make(saveableState),\n\t\tVersion: EcsDataVersion,\n\t}\n\tmanager := &basicStateManager{\n\t\tstatePath: cfg.DataDir,\n\t\tstate: state,\n\t}\n\n\tfor _, option := range options {\n\t\toption(manager)\n\t}\n\n\treturn manager, nil\n}\n\n\/\/ AddSaveable is an option that adds a given saveable as one that should be saved\n\/\/ under the given name. The name must be the same across uses of the\n\/\/ statemanager (e.g. program invocations) for it to be serialized and\n\/\/ deserialized correctly.\nfunc AddSaveable(name string, saveable Saveable) Option {\n\treturn (Option)(func(m StateManager) {\n\t\tmanager, ok := m.(*basicStateManager)\n\t\tif !ok {\n\t\t\tlog.Crit(\"Unable to add to state manager; unknown instantiation\")\n\t\t\treturn\n\t\t}\n\t\tmanager.state.Data[name] = &saveable\n\t})\n}\n\n\/\/ Save triggers a save to file, though respects a minimum save interval to wait\n\/\/ between saves.\nfunc (manager *basicStateManager) Save() error {\n\tmanager.Lock()\n\tdefer manager.Unlock()\n\tif time.Since(manager.lastSave) >= minSaveInterval {\n\t\t\/\/ we can just save\n\t\terr := manager.ForceSave()\n\t\tmanager.lastSave = time.Now()\n\t\tmanager.nextPlannedSave = time.Time{} \/\/ re-zero it; assume all pending desires to save are fulfilled\n\t\treturn err\n\t} else if manager.nextPlannedSave.IsZero() {\n\t\t\/\/ No save planned yet, we should plan one.\n\t\tnext := manager.lastSave.Add(minSaveInterval)\n\t\tmanager.nextPlannedSave = next\n\t\tgo func() {\n\t\t\ttime.Sleep(next.Sub(time.Now()))\n\t\t\tmanager.Save()\n\t\t}()\n\t}\n\t\/\/ else nextPlannedSave wasn't Zero so there's a save planned elsewhere that'll\n\t\/\/ fulfill this\n\treturn nil\n}\n\n\/\/ ForceSave saves the given State to a file. It is an atomic operation on POSIX\n\/\/ systems (by Renaming over the target file).\n\/\/ This function logs errors at will and does not necessarily expect the caller\n\/\/ to handle the error because there's little a caller can do in general other\n\/\/ than just keep going.\n\/\/ In addition, the StateManager internally buffers save requests in order to\n\/\/ only save at most every STATE_SAVE_INTERVAL.\nfunc (manager *basicStateManager) ForceSave() error {\n\tlog.Info(\"Saving state!\")\n\ts := manager.state\n\ts.Version = EcsDataVersion\n\n\tdata, err := json.Marshal(s)\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not marshal data; this is odd\", \"err\", err)\n\t\treturn err\n\t}\n\t\/\/ Make our temp-file on the same volume as our data-file to ensure we can\n\t\/\/ actually move it atomically; cross-device renaming will error out.\n\ttmpfile, err := ioutil.TempFile(manager.statePath, \"tmp_ecs_agent_data\")\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not create temp file to save state\", \"err\", err)\n\t\treturn err\n\t}\n\t_, err = tmpfile.Write(data)\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not write to temp file to save state\", \"err\", err)\n\t\treturn err\n\t}\n\terr = os.Rename(tmpfile.Name(), filepath.Join(manager.statePath, ecsDataFile))\n\tif err != nil {\n\t\tlog.Error(\"Error saving state; could not move to data file\", \"err\", err)\n\t}\n\treturn err\n}\n\n\/\/ Load reads state off the disk from the well-known filepath and loads it into\n\/\/ the passed State object.\nfunc (manager *basicStateManager) Load() error {\n\t\/\/ Note that even if Save overwrites the file we're looking at here, we\n\t\/\/ still hold the old inode and should read the old data so no locking is\n\t\/\/ needed (given Linux and the ext* family of fs at least).\n\ts := manager.state\n\tlog.Info(\"Loading state!\")\n\tfile, err := os.Open(filepath.Join(manager.statePath, ecsDataFile))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t\/\/ Happens every first run; not a real error\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdata, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Error(\"Error reading existing state file\", \"err\", err)\n\t\treturn err\n\t}\n\t\/\/ Dry-run to make sure this is a version we can understand\n\ttmps := versionOnlyState{}\n\terr = json.Unmarshal(data, &tmps)\n\tif err != nil {\n\t\tlog.Crit(\"Could not unmarshal existing state; corrupted data?\", \"err\", err, \"data\", data)\n\t\treturn err\n\t}\n\tif tmps.Version > EcsDataVersion {\n\t\tstrversion := strconv.Itoa(tmps.Version)\n\t\treturn errors.New(\"Unsupported data format: Version \" + strversion + \" not \" + strconv.Itoa(EcsDataVersion))\n\t}\n\t\/\/ Now load it into the actual state. The reason we do this with the\n\t\/\/ intermediate state is that we *must* unmarshal directly into the\n\t\/\/ \"saveable\" pointers we were given in AddSaveable; if we unmarshal\n\t\/\/ directly into a map with values of pointers, those pointers are lost.\n\t\/\/ We *must* unmarshal this way because the existing pointers could have\n\t\/\/ semi-initialized data (and are actually expected to)\n\n\tvar intermediate intermediateState\n\terr = json.Unmarshal(data, &intermediate)\n\tif err != nil {\n\t\tlog.Debug(\"Could not unmarshal into intermediate\")\n\t\treturn err\n\t}\n\n\tfor key, rawJSON := range intermediate.Data {\n\t\tactualPointer, ok := manager.state.Data[key]\n\t\tif !ok {\n\t\t\tlog.Error(\"Loading state: potentially malformed json key of \" + key)\n\t\t\tcontinue\n\t\t}\n\t\terr = json.Unmarshal(rawJSON, actualPointer)\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Could not unmarshal into actual\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Debug(\"Loaded state!\", \"state\", s)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \t\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage snowball\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/blevesearch\/bleve\/analysis\"\n\t\"github.com\/blevesearch\/bleve\/registry\"\n\n\t\"github.com\/kljensen\/snowball\"\n)\n\nconst Name = \"stemmer_snowball\"\n\ntype SnowballStemmer struct {\n\tlanguage string\n}\n\nfunc NewSnowballStemmer(language string) *SnowballStemmer {\n\treturn &SnowballStemmer{\n\t\tlanguage: language,\n\t}\n}\n\nfunc (s *SnowballStemmer) Filter(input analysis.TokenStream) analysis.TokenStream {\n\tfor _, token := range input {\n\t\t\/\/ if it is not a protected keyword, stem it\n\t\tif !token.KeyWord {\n\t\t\tstemmed, _ := snowball.Stem(string(token.Term), s.langauge, true)\n\t\t\ttoken.Term = []byte(stemmed)\n\t\t}\n\t}\n\treturn input\n}\n\nfunc SnowballStemmerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) {\n\tlanguage, ok := config[\"language\"].(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"must specify language\")\n\t}\n\treturn NewSnowballStemmer(language), nil\n}\n\nfunc init() {\n\tregistry.RegisterTokenFilter(Name, SnowballStemmerConstructor)\n}\n<commit_msg>Fix another spelling for CI<commit_after>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \t\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage snowball\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/blevesearch\/bleve\/analysis\"\n\t\"github.com\/blevesearch\/bleve\/registry\"\n\n\t\"github.com\/kljensen\/snowball\"\n)\n\nconst Name = \"stemmer_snowball\"\n\ntype SnowballStemmer struct {\n\tlanguage string\n}\n\nfunc NewSnowballStemmer(language string) *SnowballStemmer {\n\treturn &SnowballStemmer{\n\t\tlanguage: language,\n\t}\n}\n\nfunc (s *SnowballStemmer) Filter(input analysis.TokenStream) analysis.TokenStream {\n\tfor _, token := range input {\n\t\t\/\/ if it is not a protected keyword, stem it\n\t\tif !token.KeyWord {\n\t\t\tstemmed, _ := snowball.Stem(string(token.Term), s.language, true)\n\t\t\ttoken.Term = []byte(stemmed)\n\t\t}\n\t}\n\treturn input\n}\n\nfunc SnowballStemmerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) {\n\tlanguage, ok := config[\"language\"].(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"must specify language\")\n\t}\n\treturn NewSnowballStemmer(language), nil\n}\n\nfunc init() {\n\tregistry.RegisterTokenFilter(Name, SnowballStemmerConstructor)\n}\n<|endoftext|>"} {"text":"<commit_before>package gitManip\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/fatih\/color\"\n\n\t\"bytes\"\n\n\t\"github.com\/k0pernicus\/goyave\/traces\"\n\tgit \"gopkg.in\/libgit2\/git2go.v26\"\n)\n\n\/*Map to match the RepositoryState enum type with a string\n *\/\nvar repositoryStateToString = map[git.RepositoryState]string{\n\tgit.RepositoryStateNone: \"None\",\n\tgit.RepositoryStateMerge: \"Merge\",\n\tgit.RepositoryStateRevert: \"Revert\",\n\tgit.RepositoryStateCherrypick: \"Cherrypick\",\n\tgit.RepositoryStateBisect: \"Bisect\",\n\tgit.RepositoryStateRebase: \"Rebase\",\n\tgit.RepositoryStateRebaseInteractive: \"Rebase Interactive\",\n\tgit.RepositoryStateRebaseMerge: \"Rebase Merge\",\n\tgit.RepositoryStateApplyMailbox: \"Apply Mailbox\",\n\tgit.RepositoryStateApplyMailboxOrRebase: \"Apply Mailbox or Rebase\",\n}\n\n\/*Global variable to set the StatusOption parameter, in order to list each file status\n *\/\nvar statusOption = git.StatusOptions{\n\tShow: git.StatusShowIndexAndWorkdir,\n\tFlags: git.StatusOptIncludeUntracked,\n\tPathspec: []string{},\n}\n\n\/*GitObject contains informations about the current git repository\n *\n *The structure is:\n * accessible:\n *\t\tIs the repository still exists in the hard drive?\n *\tpath:\n *\t\tThe path file.\n *\trepository:\n *\t\tThe object repository.\n *\/\ntype GitObject struct {\n\taccessible error\n\tpath string\n\trepository git.Repository\n}\n\n\/*New is a constructor for GitObject\n *\n * It neeeds:\n *\tpath:\n *\t\tThe path of the current repository.\n *\/\nfunc New(path string) *GitObject {\n\tr, err := git.OpenRepository(path)\n\treturn &GitObject{accessible: err, path: path, repository: *r}\n}\n\n\/*Clone is cloning a given repository, from a public URL\n *\n * It needs:\n * path:\n *\t\tThe local path to clone the repository.\n *\tURL:\n *\t\tThe remote URL to fetch the repository.\n *\/\nfunc Clone(path, URL string) error {\n\t_, err := git.Clone(URL, path, &git.CloneOptions{})\n\treturn err\n}\n\n\/*GetRemoteURL returns the associated remote URL of a given local path repository\n *\n * It needs:\n *\tpath\n *\t\tThe local path of a git repository\n *\/\nfunc GetRemoteURL(path string) string {\n\tr, err := git.OpenRepository(path)\n\tif err != nil {\n\t\tfmt.Println(\"The repository can't be opened\")\n\t\treturn \"\"\n\t}\n\tremoteCollection := r.Remotes\n\toriginRemote, err := remoteCollection.Lookup(\"origin\")\n\tif err != nil {\n\t\ttraces.WarningTracer.Printf(\"can't lookup origin remote URL for %s\", path)\n\t\treturn \"\"\n\t}\n\treturn originRemote.Url()\n}\n\n\/*isAccesible returns the information that is the current git repository is existing or not.\n *This method returns a boolean value: true if the git repository is still accesible (still exists), or false if not.\n *\/\nfunc (g *GitObject) isAccessible() bool {\n\treturn g.accessible == nil\n}\n\n\/*Status prints the current status of the repository, accessible via the structure path field.\n *This method works only if the repository is accessible.\n *\/\nfunc (g *GitObject) Status() {\n\tif g.isAccessible() {\n\t\tif err := g.printChanges(); err != nil {\n\t\t\tcolor.RedString(\"Impossible to get stats from %s, due to error %s\", g.path, err)\n\t\t}\n\t} else {\n\t\tcolor.RedString(\"Repository %s not found!\", g.path)\n\t}\n}\n\n\/*getDiffWithWT returns the difference between the working tree and the index, for the current git repository.\n *If there is an error processing the request, it returns an error.\n *\/\nfunc (g *GitObject) getDiffWithWT() (*git.Diff, error) {\n\t\/\/ Get the index of the repository\n\tcurrentIndex, err := g.repository.Index()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Get the default diff options, and add it custom flags\n\tdefaultDiffOptions, err := git.DefaultDiffOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefaultDiffOptions.Flags = defaultDiffOptions.Flags | git.DiffIncludeUntracked | git.DiffIncludeTypeChange\n\t\/\/ Check the difference between the working directory and the index\n\tdiff, err := g.repository.DiffIndexToWorkdir(currentIndex, &defaultDiffOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn diff, nil\n}\n\nfunc (g *GitObject) getCommitsAheadBehind() (int, int, error) {\n\trepositoryHead, err := g.repository.Head()\n\t\/\/ Check upstream branch head\n\tcBranch := repositoryHead.Branch()\n\tcReference, err := cBranch.Upstream()\n\tif err != nil {\n\t\treturn -1, -1, err\n\t}\n\tcReferenceTarget := cReference.Target()\n\tcRepositoryTarget := repositoryHead.Target()\n\tcommitsAhead, commitsBehind, err := g.repository.AheadBehind(cRepositoryTarget, cReferenceTarget)\n\treturn commitsAhead, commitsBehind, err\n}\n\n\/*printChanges prints out all changes for the current git repository.\n *If there is an error processing the request, it returns this one.\n *\/\nfunc (g *GitObject) printChanges() error {\n\tdiff, err := g.getDiffWithWT()\n\tvar buffer bytes.Buffer\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\tnumDeltas, err := diff.NumDeltas()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\theadDetached, err := g.repository.IsHeadDetached()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif headDetached {\n\t\toutputHead := fmt.Sprintf(\"%s\", color.RedString(\"\\t\/!\\\\ The repository's HEAD is detached! \/!\\\\\\n\"))\n\t\tbuffer.WriteString(outputHead)\n\t}\n\t\n\tif numDeltas > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %9s\\t[%d modification(s)]\\n\", color.RedString(\"✘\"), g.path, numDeltas))\n\t\tfor i := 0; i < numDeltas; i++ {\n\t\t\tdelta, _ := diff.GetDelta(i)\n\t\t\tcurrentStatus := delta.Status\n\t\t\tnewFile := delta.NewFile.Path\n\t\t\toldFile := delta.OldFile.Path\n\t\t\tswitch currentStatus {\n\t\t\tcase git.DeltaAdded:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been added!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaDeleted:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been deleted!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaModified:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been modified!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaRenamed:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been renamed to %s!\\n\", color.MagentaString(oldFile), color.MagentaString(newFile)))\n\t\t\tcase git.DeltaUntracked:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s is untracked - please to add it or update the gitignore file!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaTypeChange:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> the type of %s has been changed from %d to %d!\", color.MagentaString(newFile), delta.OldFile.Mode, delta.NewFile.Mode))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s\\n\", color.GreenString(\"✔\"), g.path))\n\t}\n\t\n\tcommitsAhead, commitsBehind, err := g.getCommitsAheadBehind()\n\tif err == nil {\n\t\tif commitsAhead != 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t%s %d commits AHEAD - You need to push your modifications soon\\n\", color.RedString(\"⟳\"), commitsAhead))\n\t\t}\n\t\tif commitsBehind != 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t%s %d commits BEHIND - You need to pull the modifications from your remote branch soon\\n\", color.RedString(\"⟲\"), commitsBehind))\n\t\t}\n\t}\n\t\n\tfmt.Print(buffer.String())\n\treturn nil\n}\n<commit_msg>Changes the output for ahead\/behind messages<commit_after>package gitManip\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/fatih\/color\"\n\n\t\"bytes\"\n\n\t\"github.com\/k0pernicus\/goyave\/traces\"\n\tgit \"gopkg.in\/libgit2\/git2go.v26\"\n)\n\n\/*Map to match the RepositoryState enum type with a string\n *\/\nvar repositoryStateToString = map[git.RepositoryState]string{\n\tgit.RepositoryStateNone: \"None\",\n\tgit.RepositoryStateMerge: \"Merge\",\n\tgit.RepositoryStateRevert: \"Revert\",\n\tgit.RepositoryStateCherrypick: \"Cherrypick\",\n\tgit.RepositoryStateBisect: \"Bisect\",\n\tgit.RepositoryStateRebase: \"Rebase\",\n\tgit.RepositoryStateRebaseInteractive: \"Rebase Interactive\",\n\tgit.RepositoryStateRebaseMerge: \"Rebase Merge\",\n\tgit.RepositoryStateApplyMailbox: \"Apply Mailbox\",\n\tgit.RepositoryStateApplyMailboxOrRebase: \"Apply Mailbox or Rebase\",\n}\n\n\/*Global variable to set the StatusOption parameter, in order to list each file status\n *\/\nvar statusOption = git.StatusOptions{\n\tShow: git.StatusShowIndexAndWorkdir,\n\tFlags: git.StatusOptIncludeUntracked,\n\tPathspec: []string{},\n}\n\n\/*GitObject contains informations about the current git repository\n *\n *The structure is:\n * accessible:\n *\t\tIs the repository still exists in the hard drive?\n *\tpath:\n *\t\tThe path file.\n *\trepository:\n *\t\tThe object repository.\n *\/\ntype GitObject struct {\n\taccessible error\n\tpath string\n\trepository git.Repository\n}\n\n\/*New is a constructor for GitObject\n *\n * It neeeds:\n *\tpath:\n *\t\tThe path of the current repository.\n *\/\nfunc New(path string) *GitObject {\n\tr, err := git.OpenRepository(path)\n\treturn &GitObject{accessible: err, path: path, repository: *r}\n}\n\n\/*Clone is cloning a given repository, from a public URL\n *\n * It needs:\n * path:\n *\t\tThe local path to clone the repository.\n *\tURL:\n *\t\tThe remote URL to fetch the repository.\n *\/\nfunc Clone(path, URL string) error {\n\t_, err := git.Clone(URL, path, &git.CloneOptions{})\n\treturn err\n}\n\n\/*GetRemoteURL returns the associated remote URL of a given local path repository\n *\n * It needs:\n *\tpath\n *\t\tThe local path of a git repository\n *\/\nfunc GetRemoteURL(path string) string {\n\tr, err := git.OpenRepository(path)\n\tif err != nil {\n\t\tfmt.Println(\"The repository can't be opened\")\n\t\treturn \"\"\n\t}\n\tremoteCollection := r.Remotes\n\toriginRemote, err := remoteCollection.Lookup(\"origin\")\n\tif err != nil {\n\t\ttraces.WarningTracer.Printf(\"can't lookup origin remote URL for %s\", path)\n\t\treturn \"\"\n\t}\n\treturn originRemote.Url()\n}\n\n\/*isAccesible returns the information that is the current git repository is existing or not.\n *This method returns a boolean value: true if the git repository is still accesible (still exists), or false if not.\n *\/\nfunc (g *GitObject) isAccessible() bool {\n\treturn g.accessible == nil\n}\n\n\/*Status prints the current status of the repository, accessible via the structure path field.\n *This method works only if the repository is accessible.\n *\/\nfunc (g *GitObject) Status() {\n\tif g.isAccessible() {\n\t\tif err := g.printChanges(); err != nil {\n\t\t\tcolor.RedString(\"Impossible to get stats from %s, due to error %s\", g.path, err)\n\t\t}\n\t} else {\n\t\tcolor.RedString(\"Repository %s not found!\", g.path)\n\t}\n}\n\n\/*getDiffWithWT returns the difference between the working tree and the index, for the current git repository.\n *If there is an error processing the request, it returns an error.\n *\/\nfunc (g *GitObject) getDiffWithWT() (*git.Diff, error) {\n\t\/\/ Get the index of the repository\n\tcurrentIndex, err := g.repository.Index()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Get the default diff options, and add it custom flags\n\tdefaultDiffOptions, err := git.DefaultDiffOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefaultDiffOptions.Flags = defaultDiffOptions.Flags | git.DiffIncludeUntracked | git.DiffIncludeTypeChange\n\t\/\/ Check the difference between the working directory and the index\n\tdiff, err := g.repository.DiffIndexToWorkdir(currentIndex, &defaultDiffOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn diff, nil\n}\n\nfunc (g *GitObject) getCommitsAheadBehind() (int, int, error) {\n\trepositoryHead, err := g.repository.Head()\n\t\/\/ Check upstream branch head\n\tcBranch := repositoryHead.Branch()\n\tcReference, err := cBranch.Upstream()\n\tif err != nil {\n\t\treturn -1, -1, err\n\t}\n\tcReferenceTarget := cReference.Target()\n\tcRepositoryTarget := repositoryHead.Target()\n\tcommitsAhead, commitsBehind, err := g.repository.AheadBehind(cRepositoryTarget, cReferenceTarget)\n\treturn commitsAhead, commitsBehind, err\n}\n\n\/*printChanges prints out all changes for the current git repository.\n *If there is an error processing the request, it returns this one.\n *\/\nfunc (g *GitObject) printChanges() error {\n\tdiff, err := g.getDiffWithWT()\n\tvar buffer bytes.Buffer\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\tnumDeltas, err := diff.NumDeltas()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\theadDetached, err := g.repository.IsHeadDetached()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif headDetached {\n\t\toutputHead := fmt.Sprintf(\"%s\", color.RedString(\"\\t\/!\\\\ The repository's HEAD is detached! \/!\\\\\\n\"))\n\t\tbuffer.WriteString(outputHead)\n\t}\n\t\n\tif numDeltas > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %9s\\t[%d modification(s)]\\n\", color.RedString(\"✘\"), g.path, numDeltas))\n\t\tfor i := 0; i < numDeltas; i++ {\n\t\t\tdelta, _ := diff.GetDelta(i)\n\t\t\tcurrentStatus := delta.Status\n\t\t\tnewFile := delta.NewFile.Path\n\t\t\toldFile := delta.OldFile.Path\n\t\t\tswitch currentStatus {\n\t\t\tcase git.DeltaAdded:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been added!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaDeleted:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been deleted!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaModified:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been modified!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaRenamed:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been renamed to %s!\\n\", color.MagentaString(oldFile), color.MagentaString(newFile)))\n\t\t\tcase git.DeltaUntracked:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s is untracked - please to add it or update the gitignore file!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaTypeChange:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> the type of %s has been changed from %d to %d!\", color.MagentaString(newFile), delta.OldFile.Mode, delta.NewFile.Mode))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s\\n\", color.GreenString(\"✔\"), g.path))\n\t}\n\t\n\tcommitsAhead, commitsBehind, err := g.getCommitsAheadBehind()\n\tif err == nil {\n\t\tif commitsAhead != 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t%s %d commits AHEAD - Soon, you will need to push your modifications\\n\", color.RedString(\"⟳\"), commitsAhead))\n\t\t}\n\t\tif commitsBehind != 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t%s %d commits BEHIND - Soon, you will need to pull the modifications from the remote branch\\n\", color.RedString(\"⟲\"), commitsBehind))\n\t\t}\n\t}\n\t\n\tfmt.Print(buffer.String())\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package google\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"google.golang.org\/api\/compute\/v1\"\n)\n\n\/\/ Add two key value pairs\nfunc TestAccComputeProjectMetadata_basic(t *testing.T) {\n\tskipIfEnvNotSet(t,\n\t\t[]string{\n\t\t\t\"GOOGLE_ORG\",\n\t\t\t\"GOOGLE_BILLING_ACCOUNT\",\n\t\t}...,\n\t)\n\n\tbillingId := os.Getenv(\"GOOGLE_BILLING_ACCOUNT\")\n\tvar project compute.Project\n\tpid := \"terrafom-test-\" + acctest.RandString(10)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckComputeProjectMetadataDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_basic0_metadata(pid, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", pid, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"banana\", \"orange\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"sofa\", \"darwinism\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(pid, 2),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ Add three key value pairs, then replace one and modify a second\nfunc TestAccComputeProjectMetadata_modify_1(t *testing.T) {\n\tskipIfEnvNotSet(t,\n\t\t[]string{\n\t\t\t\"GOOGLE_ORG\",\n\t\t\t\"GOOGLE_BILLING_ACCOUNT\",\n\t\t}...,\n\t)\n\n\tbillingId := os.Getenv(\"GOOGLE_BILLING_ACCOUNT\")\n\tvar project compute.Project\n\tpid := \"terrafom-test-\" + acctest.RandString(10)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckComputeProjectMetadataDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_modify0_metadata(pid, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", pid, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"paper\", \"pen\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"genghis_khan\", \"french bread\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"happy\", \"smiling\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(pid, 3),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_modify1_metadata(pid, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", pid, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"paper\", \"pen\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"paris\", \"french bread\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"happy\", \"laughing\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(pid, 3),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ Add two key value pairs, and replace both\nfunc TestAccComputeProjectMetadata_modify_2(t *testing.T) {\n\tskipIfEnvNotSet(t,\n\t\t[]string{\n\t\t\t\"GOOGLE_ORG\",\n\t\t\t\"GOOGLE_BILLING_ACCOUNT\",\n\t\t}...,\n\t)\n\n\tbillingId := os.Getenv(\"GOOGLE_BILLING_ACCOUNT\")\n\tvar project compute.Project\n\tpid := \"terraform-test-\" + acctest.RandString(10)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckComputeProjectMetadataDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_basic0_metadata(pid, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", pid, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"banana\", \"orange\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"sofa\", \"darwinism\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(pid, 2),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_basic1_metadata(pid, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", pid, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"kiwi\", \"papaya\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(pid, \"finches\", \"darwinism\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(pid, 2),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {\n\tconfig := testAccProvider.Meta().(*Config)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"google_compute_project_metadata\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tproject, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do()\n\t\tif err == nil && len(project.CommonInstanceMetadata.Items) > 0 {\n\t\t\treturn fmt.Errorf(\"Error, metadata items still exist in %s\", rs.Primary.ID)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testAccCheckComputeProjectExists(n, pid string, project *compute.Project) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tfound, err := config.clientCompute.Projects.Get(\n\t\t\tpid).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif \"common_metadata\" != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"Common metadata not found, found %s\", rs.Primary.ID)\n\t\t}\n\n\t\t*project = *found\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckComputeProjectMetadataContains(pid, key, value string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\t\tproject, err := config.clientCompute.Projects.Get(pid).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error, failed to load project service for %s: %s\", config.Project, err)\n\t\t}\n\n\t\tfor _, kv := range project.CommonInstanceMetadata.Items {\n\t\t\tif kv.Key == key {\n\t\t\t\tif kv.Value != nil && *kv.Value == value {\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"Error, key value mismatch, wanted (%s, %s), got (%s, %s)\",\n\t\t\t\t\t\tkey, value, kv.Key, *kv.Value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error, key %s not present in %s\", key, project.SelfLink)\n\t}\n}\n\nfunc testAccCheckComputeProjectMetadataSize(pid string, size int) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\t\tproject, err := config.clientCompute.Projects.Get(pid).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error, failed to load project service for %s: %s\", config.Project, err)\n\t\t}\n\n\t\tif size > len(project.CommonInstanceMetadata.Items) {\n\t\t\treturn fmt.Errorf(\"Error, expected at least %d metadata items, got %d\", size,\n\t\t\t\tlen(project.CommonInstanceMetadata.Items))\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccComputeProject_basic0_metadata(pid, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n banana = \"orange\"\n sofa = \"darwinism\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, pid, name, org, billing)\n}\n\nfunc testAccComputeProject_basic1_metadata(pid, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n kiwi = \"papaya\"\n finches = \"darwinism\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, pid, name, org, billing)\n}\n\nfunc testAccComputeProject_modify0_metadata(pid, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n paper = \"pen\"\n genghis_khan = \"french bread\"\n happy = \"smiling\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, pid, name, org, billing)\n}\n\nfunc testAccComputeProject_modify1_metadata(pid, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n paper = \"pen\"\n paris = \"french bread\"\n happy = \"laughing\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, pid, name, org, billing)\n}\n<commit_msg>Fix both Radek & Dana's comments.<commit_after>package google\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"google.golang.org\/api\/compute\/v1\"\n)\n\n\/\/ Add two key value pairs\nfunc TestAccComputeProjectMetadata_basic(t *testing.T) {\n\tskipIfEnvNotSet(t,\n\t\t[]string{\n\t\t\t\"GOOGLE_ORG\",\n\t\t\t\"GOOGLE_BILLING_ACCOUNT\",\n\t\t}...,\n\t)\n\n\tbillingId := os.Getenv(\"GOOGLE_BILLING_ACCOUNT\")\n\tvar project compute.Project\n\tprojectID := \"terrafom-test-\" + acctest.RandString(10)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckComputeProjectMetadataDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", projectID, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"banana\", \"orange\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"sofa\", \"darwinism\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(projectID, 2),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ Add three key value pairs, then replace one and modify a second\nfunc TestAccComputeProjectMetadata_modify_1(t *testing.T) {\n\tskipIfEnvNotSet(t,\n\t\t[]string{\n\t\t\t\"GOOGLE_ORG\",\n\t\t\t\"GOOGLE_BILLING_ACCOUNT\",\n\t\t}...,\n\t)\n\n\tbillingId := os.Getenv(\"GOOGLE_BILLING_ACCOUNT\")\n\tvar project compute.Project\n\tprojectID := \"terrafom-test-\" + acctest.RandString(10)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckComputeProjectMetadataDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", projectID, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"paper\", \"pen\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"genghis_khan\", \"french bread\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"happy\", \"smiling\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(projectID, 3),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", projectID, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"paper\", \"pen\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"paris\", \"french bread\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"happy\", \"laughing\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(projectID, 3),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ Add two key value pairs, and replace both\nfunc TestAccComputeProjectMetadata_modify_2(t *testing.T) {\n\tskipIfEnvNotSet(t,\n\t\t[]string{\n\t\t\t\"GOOGLE_ORG\",\n\t\t\t\"GOOGLE_BILLING_ACCOUNT\",\n\t\t}...,\n\t)\n\n\tbillingId := os.Getenv(\"GOOGLE_BILLING_ACCOUNT\")\n\tvar project compute.Project\n\tprojectID := \"terraform-test-\" + acctest.RandString(10)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckComputeProjectMetadataDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", projectID, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"banana\", \"orange\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"sofa\", \"darwinism\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(projectID, 2),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckComputeProjectExists(\n\t\t\t\t\t\t\"google_compute_project_metadata.fizzbuzz\", projectID, &project),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"kiwi\", \"papaya\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataContains(projectID, \"finches\", \"darwinism\"),\n\t\t\t\t\ttestAccCheckComputeProjectMetadataSize(projectID, 2),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {\n\tconfig := testAccProvider.Meta().(*Config)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"google_compute_project_metadata\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tproject, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do()\n\t\tif err == nil && len(project.CommonInstanceMetadata.Items) > 0 {\n\t\t\treturn fmt.Errorf(\"Error, metadata items still exist in %s\", rs.Primary.ID)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tfound, err := config.clientCompute.Projects.Get(projectID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif \"common_metadata\" != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"Common metadata not found, found %s\", rs.Primary.ID)\n\t\t}\n\n\t\t*project = *found\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\t\tproject, err := config.clientCompute.Projects.Get(projectID).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error, failed to load project service for %s: %s\", config.Project, err)\n\t\t}\n\n\t\tfor _, kv := range project.CommonInstanceMetadata.Items {\n\t\t\tif kv.Key == key {\n\t\t\t\tif kv.Value != nil && *kv.Value == value {\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"Error, key value mismatch, wanted (%s, %s), got (%s, %s)\",\n\t\t\t\t\t\tkey, value, kv.Key, *kv.Value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error, key %s not present in %s\", key, project.SelfLink)\n\t}\n}\n\nfunc testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\t\tproject, err := config.clientCompute.Projects.Get(projectID).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error, failed to load project service for %s: %s\", config.Project, err)\n\t\t}\n\n\t\tif size > len(project.CommonInstanceMetadata.Items) {\n\t\t\treturn fmt.Errorf(\"Error, expected at least %d metadata items, got %d\", size,\n\t\t\t\tlen(project.CommonInstanceMetadata.Items))\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n banana = \"orange\"\n sofa = \"darwinism\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, projectID, name, org, billing)\n}\n\nfunc testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n kiwi = \"papaya\"\n finches = \"darwinism\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, projectID, name, org, billing)\n}\n\nfunc testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n paper = \"pen\"\n genghis_khan = \"french bread\"\n happy = \"smiling\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, projectID, name, org, billing)\n}\n\nfunc testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string {\n\treturn fmt.Sprintf(`\nresource \"google_project\" \"project\" {\n project_id = \"%s\"\n name = \"%s\"\n org_id = \"%s\"\n billing_account = \"%s\"\n}\n\nresource \"google_project_services\" \"services\" {\n project = \"${google_project.project.project_id}\"\n services = [\"compute-component.googleapis.com\"]\n}\n\nresource \"google_compute_project_metadata\" \"fizzbuzz\" {\n project = \"${google_project.project.project_id}\"\n metadata {\n paper = \"pen\"\n paris = \"french bread\"\n happy = \"laughing\"\n }\n depends_on = [\"google_project_services.services\"]\n}`, projectID, name, org, billing)\n}\n<|endoftext|>"} {"text":"<commit_before>package openstack\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/rackspace\/gophercloud\"\n\t\"github.com\/rackspace\/gophercloud\/openstack\/compute\/v2\/extensions\/secgroups\"\n)\n\nfunc resourceComputeSecGroupV2() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceComputeSecGroupV2Create,\n\t\tRead: resourceComputeSecGroupV2Read,\n\t\tUpdate: resourceComputeSecGroupV2Update,\n\t\tDelete: resourceComputeSecGroupV2Delete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"region\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tDefaultFunc: envDefaultFuncAllowMissing(\"OS_REGION_NAME\"),\n\t\t\t},\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: false,\n\t\t\t},\n\t\t\t\"description\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: false,\n\t\t\t},\n\t\t\t\"rule\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"id\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ip_protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"cidr\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"from_group_id\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tcreateOpts := secgroups.CreateOpts{\n\t\tName: d.Get(\"name\").(string),\n\t\tDescription: d.Get(\"description\").(string),\n\t}\n\n\tlog.Printf(\"[DEBUG] Create Options: %#v\", createOpts)\n\tsg, err := secgroups.Create(computeClient, createOpts).Extract()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack security group: %s\", err)\n\t}\n\n\td.SetId(sg.ID)\n\n\tcreateRuleOptsList := resourceSecGroupRulesV2(d)\n\tfor _, createRuleOpts := range createRuleOptsList {\n\t\t_, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error creating OpenStack security group rule: %s\", err)\n\t\t}\n\t}\n\n\treturn resourceComputeSecGroupV2Read(d, meta)\n}\n\nfunc resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tsg, err := secgroups.Get(computeClient, d.Id()).Extract()\n\tif err != nil {\n\t\treturn CheckDeleted(d, err, \"security group\")\n\t}\n\n\td.Set(\"name\", sg.Name)\n\td.Set(\"description\", sg.Description)\n\trtm := rulesToMap(sg.Rules)\n\tfor _, v := range rtm {\n\t\tif v[\"group\"] == d.Get(\"name\") {\n\t\t\tv[\"self\"] = \"1\"\n\t\t} else {\n\t\t\tv[\"self\"] = \"0\"\n\t\t}\n\t}\n\tlog.Printf(\"[DEBUG] rulesToMap(sg.Rules): %+v\", rtm)\n\td.Set(\"rule\", rtm)\n\n\treturn nil\n}\n\nfunc resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tupdateOpts := secgroups.UpdateOpts{\n\t\tName: d.Get(\"name\").(string),\n\t\tDescription: d.Get(\"description\").(string),\n\t}\n\n\tlog.Printf(\"[DEBUG] Updating Security Group (%s) with options: %+v\", d.Id(), updateOpts)\n\n\t_, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error updating OpenStack security group (%s): %s\", d.Id(), err)\n\t}\n\n\tif d.HasChange(\"rule\") {\n\t\toldSGRaw, newSGRaw := d.GetChange(\"rule\")\n\t\toldSGRSlice, newSGRSlice := oldSGRaw.([]interface{}), newSGRaw.([]interface{})\n\t\toldSGRSet := schema.NewSet(secgroupRuleV2Hash, oldSGRSlice)\n\t\tnewSGRSet := schema.NewSet(secgroupRuleV2Hash, newSGRSlice)\n\t\tsecgrouprulesToAdd := newSGRSet.Difference(oldSGRSet)\n\t\tsecgrouprulesToRemove := oldSGRSet.Difference(newSGRSet)\n\n\t\tlog.Printf(\"[DEBUG] Security group rules to add: %v\", secgrouprulesToAdd)\n\n\t\tlog.Printf(\"[DEBUG] Security groups rules to remove: %v\", secgrouprulesToRemove)\n\n\t\tfor _, rawRule := range secgrouprulesToAdd.List() {\n\t\t\tcreateRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule)\n\t\t\trule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error adding rule to OpenStack security group (%s): %s\", d.Id(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"[DEBUG] Added rule (%s) to OpenStack security group (%s) \", rule.ID, d.Id())\n\t\t}\n\n\t\tfor _, r := range secgrouprulesToRemove.List() {\n\t\t\trule := resourceSecGroupRuleV2(d, r)\n\t\t\terr := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr()\n\t\t\tif err != nil {\n\t\t\t\terrCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Error removing rule (%s) from OpenStack security group (%s): %s\", rule.ID, d.Id(), err)\n\t\t\t\t}\n\t\t\t\tif errCode.Actual == 404 {\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"Error removing rule (%s) from OpenStack security group (%s)\", rule.ID, d.Id())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s\", rule.ID, d.Id(), err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resourceComputeSecGroupV2Read(d, meta)\n}\n\nfunc resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\terr = secgroups.Delete(computeClient, d.Id()).ExtractErr()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting OpenStack security group: %s\", err)\n\t}\n\td.SetId(\"\")\n\treturn nil\n}\n\nfunc resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts {\n\trawRules := d.Get(\"rule\").([]interface{})\n\tcreateRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules))\n\tfor i, raw := range rawRules {\n\t\trawMap := raw.(map[string]interface{})\n\t\tgroupId := rawMap[\"from_group_id\"].(string)\n\t\tif rawMap[\"self\"].(bool) {\n\t\t\tgroupId = d.Id()\n\t\t}\n\t\tcreateRuleOptsList[i] = secgroups.CreateRuleOpts{\n\t\t\tParentGroupID: d.Id(),\n\t\t\tFromPort: rawMap[\"from_port\"].(int),\n\t\t\tToPort: rawMap[\"to_port\"].(int),\n\t\t\tIPProtocol: rawMap[\"ip_protocol\"].(string),\n\t\t\tCIDR: rawMap[\"cidr\"].(string),\n\t\t\tFromGroupID: groupId,\n\t\t}\n\t}\n\treturn createRuleOptsList\n}\n\nfunc resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, raw interface{}) secgroups.CreateRuleOpts {\n\trawMap := raw.(map[string]interface{})\n\tgroupId := rawMap[\"from_group_id\"].(string)\n\tif rawMap[\"self\"].(bool) {\n\t\tgroupId = d.Id()\n\t}\n\treturn secgroups.CreateRuleOpts{\n\t\tParentGroupID: d.Id(),\n\t\tFromPort: rawMap[\"from_port\"].(int),\n\t\tToPort: rawMap[\"to_port\"].(int),\n\t\tIPProtocol: rawMap[\"ip_protocol\"].(string),\n\t\tCIDR: rawMap[\"cidr\"].(string),\n\t\tFromGroupID: groupId,\n\t}\n}\n\nfunc resourceSecGroupRuleV2(d *schema.ResourceData, raw interface{}) secgroups.Rule {\n\trawMap := raw.(map[string]interface{})\n\treturn secgroups.Rule{\n\t\tID: rawMap[\"id\"].(string),\n\t\tParentGroupID: d.Id(),\n\t\tFromPort: rawMap[\"from_port\"].(int),\n\t\tToPort: rawMap[\"to_port\"].(int),\n\t\tIPProtocol: rawMap[\"ip_protocol\"].(string),\n\t\tIPRange: secgroups.IPRange{CIDR: rawMap[\"cidr\"].(string)},\n\t}\n}\n\nfunc rulesToMap(sgrs []secgroups.Rule) []map[string]interface{} {\n\tsgrMap := make([]map[string]interface{}, len(sgrs))\n\tfor i, sgr := range sgrs {\n\t\tsgrMap[i] = map[string]interface{}{\n\t\t\t\"id\": sgr.ID,\n\t\t\t\"from_port\": sgr.FromPort,\n\t\t\t\"to_port\": sgr.ToPort,\n\t\t\t\"ip_protocol\": sgr.IPProtocol,\n\t\t\t\"cidr\": sgr.IPRange.CIDR,\n\t\t\t\"group\": sgr.Group.Name,\n\t\t}\n\t}\n\treturn sgrMap\n}\n\nfunc secgroupRuleV2Hash(v interface{}) int {\n\tvar buf bytes.Buffer\n\tm := v.(map[string]interface{})\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"from_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"to_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"ip_protocol\"].(string)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"cidr\"].(string)))\n\n\treturn hashcode.String(buf.String())\n}\n<commit_msg>provider\/openstack: Safe SecGroup Delete<commit_after>package openstack\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/rackspace\/gophercloud\"\n\t\"github.com\/rackspace\/gophercloud\/openstack\/compute\/v2\/extensions\/secgroups\"\n)\n\nfunc resourceComputeSecGroupV2() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceComputeSecGroupV2Create,\n\t\tRead: resourceComputeSecGroupV2Read,\n\t\tUpdate: resourceComputeSecGroupV2Update,\n\t\tDelete: resourceComputeSecGroupV2Delete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"region\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tDefaultFunc: envDefaultFuncAllowMissing(\"OS_REGION_NAME\"),\n\t\t\t},\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: false,\n\t\t\t},\n\t\t\t\"description\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: false,\n\t\t\t},\n\t\t\t\"rule\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"id\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ip_protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"cidr\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"from_group_id\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t\tForceNew: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tcreateOpts := secgroups.CreateOpts{\n\t\tName: d.Get(\"name\").(string),\n\t\tDescription: d.Get(\"description\").(string),\n\t}\n\n\tlog.Printf(\"[DEBUG] Create Options: %#v\", createOpts)\n\tsg, err := secgroups.Create(computeClient, createOpts).Extract()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack security group: %s\", err)\n\t}\n\n\td.SetId(sg.ID)\n\n\tcreateRuleOptsList := resourceSecGroupRulesV2(d)\n\tfor _, createRuleOpts := range createRuleOptsList {\n\t\t_, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error creating OpenStack security group rule: %s\", err)\n\t\t}\n\t}\n\n\treturn resourceComputeSecGroupV2Read(d, meta)\n}\n\nfunc resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tsg, err := secgroups.Get(computeClient, d.Id()).Extract()\n\tif err != nil {\n\t\treturn CheckDeleted(d, err, \"security group\")\n\t}\n\n\td.Set(\"name\", sg.Name)\n\td.Set(\"description\", sg.Description)\n\trtm := rulesToMap(sg.Rules)\n\tfor _, v := range rtm {\n\t\tif v[\"group\"] == d.Get(\"name\") {\n\t\t\tv[\"self\"] = \"1\"\n\t\t} else {\n\t\t\tv[\"self\"] = \"0\"\n\t\t}\n\t}\n\tlog.Printf(\"[DEBUG] rulesToMap(sg.Rules): %+v\", rtm)\n\td.Set(\"rule\", rtm)\n\n\treturn nil\n}\n\nfunc resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tupdateOpts := secgroups.UpdateOpts{\n\t\tName: d.Get(\"name\").(string),\n\t\tDescription: d.Get(\"description\").(string),\n\t}\n\n\tlog.Printf(\"[DEBUG] Updating Security Group (%s) with options: %+v\", d.Id(), updateOpts)\n\n\t_, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error updating OpenStack security group (%s): %s\", d.Id(), err)\n\t}\n\n\tif d.HasChange(\"rule\") {\n\t\toldSGRaw, newSGRaw := d.GetChange(\"rule\")\n\t\toldSGRSlice, newSGRSlice := oldSGRaw.([]interface{}), newSGRaw.([]interface{})\n\t\toldSGRSet := schema.NewSet(secgroupRuleV2Hash, oldSGRSlice)\n\t\tnewSGRSet := schema.NewSet(secgroupRuleV2Hash, newSGRSlice)\n\t\tsecgrouprulesToAdd := newSGRSet.Difference(oldSGRSet)\n\t\tsecgrouprulesToRemove := oldSGRSet.Difference(newSGRSet)\n\n\t\tlog.Printf(\"[DEBUG] Security group rules to add: %v\", secgrouprulesToAdd)\n\n\t\tlog.Printf(\"[DEBUG] Security groups rules to remove: %v\", secgrouprulesToRemove)\n\n\t\tfor _, rawRule := range secgrouprulesToAdd.List() {\n\t\t\tcreateRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule)\n\t\t\trule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error adding rule to OpenStack security group (%s): %s\", d.Id(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"[DEBUG] Added rule (%s) to OpenStack security group (%s) \", rule.ID, d.Id())\n\t\t}\n\n\t\tfor _, r := range secgrouprulesToRemove.List() {\n\t\t\trule := resourceSecGroupRuleV2(d, r)\n\t\t\terr := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr()\n\t\t\tif err != nil {\n\t\t\t\terrCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Error removing rule (%s) from OpenStack security group (%s): %s\", rule.ID, d.Id(), err)\n\t\t\t\t}\n\t\t\t\tif errCode.Actual == 404 {\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"Error removing rule (%s) from OpenStack security group (%s)\", rule.ID, d.Id())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s\", rule.ID, d.Id(), err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resourceComputeSecGroupV2Read(d, meta)\n}\n\nfunc resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error {\n\tconfig := meta.(*Config)\n\tcomputeClient, err := config.computeV2Client(d.Get(\"region\").(string))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating OpenStack compute client: %s\", err)\n\t}\n\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"ACTIVE\"},\n\t\tTarget: \"DELETED\",\n\t\tRefresh: SecGroupV2StateRefreshFunc(computeClient, d),\n\t\tTimeout: 10 * time.Minute,\n\t\tDelay: 10 * time.Second,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\t_, err = stateConf.WaitForState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting OpenStack security group: %s\", err)\n\t}\n\n\td.SetId(\"\")\n\treturn nil\n}\n\nfunc resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts {\n\trawRules := d.Get(\"rule\").([]interface{})\n\tcreateRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules))\n\tfor i, raw := range rawRules {\n\t\trawMap := raw.(map[string]interface{})\n\t\tgroupId := rawMap[\"from_group_id\"].(string)\n\t\tif rawMap[\"self\"].(bool) {\n\t\t\tgroupId = d.Id()\n\t\t}\n\t\tcreateRuleOptsList[i] = secgroups.CreateRuleOpts{\n\t\t\tParentGroupID: d.Id(),\n\t\t\tFromPort: rawMap[\"from_port\"].(int),\n\t\t\tToPort: rawMap[\"to_port\"].(int),\n\t\t\tIPProtocol: rawMap[\"ip_protocol\"].(string),\n\t\t\tCIDR: rawMap[\"cidr\"].(string),\n\t\t\tFromGroupID: groupId,\n\t\t}\n\t}\n\treturn createRuleOptsList\n}\n\nfunc resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, raw interface{}) secgroups.CreateRuleOpts {\n\trawMap := raw.(map[string]interface{})\n\tgroupId := rawMap[\"from_group_id\"].(string)\n\tif rawMap[\"self\"].(bool) {\n\t\tgroupId = d.Id()\n\t}\n\treturn secgroups.CreateRuleOpts{\n\t\tParentGroupID: d.Id(),\n\t\tFromPort: rawMap[\"from_port\"].(int),\n\t\tToPort: rawMap[\"to_port\"].(int),\n\t\tIPProtocol: rawMap[\"ip_protocol\"].(string),\n\t\tCIDR: rawMap[\"cidr\"].(string),\n\t\tFromGroupID: groupId,\n\t}\n}\n\nfunc resourceSecGroupRuleV2(d *schema.ResourceData, raw interface{}) secgroups.Rule {\n\trawMap := raw.(map[string]interface{})\n\treturn secgroups.Rule{\n\t\tID: rawMap[\"id\"].(string),\n\t\tParentGroupID: d.Id(),\n\t\tFromPort: rawMap[\"from_port\"].(int),\n\t\tToPort: rawMap[\"to_port\"].(int),\n\t\tIPProtocol: rawMap[\"ip_protocol\"].(string),\n\t\tIPRange: secgroups.IPRange{CIDR: rawMap[\"cidr\"].(string)},\n\t}\n}\n\nfunc rulesToMap(sgrs []secgroups.Rule) []map[string]interface{} {\n\tsgrMap := make([]map[string]interface{}, len(sgrs))\n\tfor i, sgr := range sgrs {\n\t\tsgrMap[i] = map[string]interface{}{\n\t\t\t\"id\": sgr.ID,\n\t\t\t\"from_port\": sgr.FromPort,\n\t\t\t\"to_port\": sgr.ToPort,\n\t\t\t\"ip_protocol\": sgr.IPProtocol,\n\t\t\t\"cidr\": sgr.IPRange.CIDR,\n\t\t\t\"group\": sgr.Group.Name,\n\t\t}\n\t}\n\treturn sgrMap\n}\n\nfunc secgroupRuleV2Hash(v interface{}) int {\n\tvar buf bytes.Buffer\n\tm := v.(map[string]interface{})\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"from_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"to_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"ip_protocol\"].(string)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"cidr\"].(string)))\n\n\treturn hashcode.String(buf.String())\n}\n\nfunc SecGroupV2StateRefreshFunc(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tlog.Printf(\"[DEBUG] Attempting to delete Security Group %s.\\n\", d.Id())\n\n\t\terr := secgroups.Delete(computeClient, d.Id()).ExtractErr()\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\n\t\ts, err := secgroups.Get(computeClient, d.Id()).Extract()\n\t\tif err != nil {\n\t\t\terr = CheckDeleted(d, err, \"Security Group\")\n\t\t\tif err != nil {\n\t\t\t\treturn s, \"\", err\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[DEBUG] Successfully deleted Security Group %s\", d.Id())\n\t\t\t\treturn s, \"DELETED\", nil\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] Security Group %s still active.\\n\", d.Id())\n\t\treturn s, \"ACTIVE\", nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport \"testing\"\n\nfunc Test_Parse(t *testing.T) {\n\ttestXml := []byte(`<?xml version=\"1.0\" encoding=\"ISO-8859-1\" standalone=\"yes\"?>\n\t\t<!DOCTYPE GANGLIA_XML [\n\t\t<!ELEMENT GANGLIA_XML (GRID|CLUSTER|HOST)*>\n\t\t<!ATTLIST GANGLIA_XML VERSION CDATA #REQUIRED>\n\t\t<!ATTLIST GANGLIA_XML SOURCE CDATA #REQUIRED>\n\t\t<!ELEMENT GRID (CLUSTER | GRID | HOSTS | METRICS)*>\n\t\t<!ATTLIST GRID NAME CDATA #REQUIRED>\n\t\t<!ATTLIST GRID AUTHORITY CDATA #REQUIRED>\n\t\t<!ATTLIST GRID LOCALTIME CDATA #IMPLIED>\n\t\t<!ELEMENT CLUSTER (HOST | HOSTS | METRICS)*>\n\t\t<!ATTLIST CLUSTER NAME CDATA #REQUIRED>\n\t\t<!ATTLIST CLUSTER OWNER CDATA #IMPLIED>\n\t\t<!ATTLIST CLUSTER LATLONG CDATA #IMPLIED>\n\t\t<!ATTLIST CLUSTER URL CDATA #IMPLIED>\n\t\t<!ATTLIST CLUSTER LOCALTIME CDATA #REQUIRED>\n\t\t<!ELEMENT HOST (METRIC)*>\n\t\t<!ATTLIST HOST NAME CDATA #REQUIRED>\n\t\t<!ATTLIST HOST IP CDATA #REQUIRED>\n\t\t<!ATTLIST HOST LOCATION CDATA #IMPLIED>\n\t\t<!ATTLIST HOST TAGS CDATA #IMPLIED>\n\t\t<!ATTLIST HOST REPORTED CDATA #REQUIRED>\n\t\t<!ATTLIST HOST TN CDATA #IMPLIED>\n\t\t<!ATTLIST HOST TMAX CDATA #IMPLIED>\n\t\t<!ATTLIST HOST DMAX CDATA #IMPLIED>\n\t\t<!ATTLIST HOST GMOND_STARTED CDATA #IMPLIED>\n\t\t<!ELEMENT METRIC (EXTRA_DATA*)>\n\t\t<!ATTLIST METRIC NAME CDATA #REQUIRED>\n\t\t<!ATTLIST METRIC VAL CDATA #REQUIRED>\n\t\t<!ATTLIST METRIC TYPE (string | int8 | uint8 | int16 | uint16 | int32 | uint32 | float | double | timestamp) #REQUIRED>\n\t\t<!ATTLIST METRIC UNITS CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC TN CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC TMAX CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC DMAX CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC SLOPE (zero | positive | negative | both | unspecified) #IMPLIED>\n\t\t<!ATTLIST METRIC SOURCE (gmond) 'gmond'>\n\t\t<!ELEMENT EXTRA_DATA (EXTRA_ELEMENT*)>\n\t\t<!ELEMENT EXTRA_ELEMENT EMPTY>\n\t\t<!ATTLIST EXTRA_ELEMENT NAME CDATA #REQUIRED>\n\t\t<!ATTLIST EXTRA_ELEMENT VAL CDATA #REQUIRED>\n\t\t<!ELEMENT HOSTS EMPTY>\n\t\t<!ATTLIST HOSTS UP CDATA #REQUIRED>\n\t\t<!ATTLIST HOSTS DOWN CDATA #REQUIRED>\n\t\t<!ATTLIST HOSTS SOURCE (gmond | gmetad) #REQUIRED>\n\t\t<!ELEMENT METRICS (EXTRA_DATA*)>\n\t\t<!ATTLIST METRICS NAME CDATA #REQUIRED>\n\t\t<!ATTLIST METRICS SUM CDATA #REQUIRED>\n\t\t<!ATTLIST METRICS NUM CDATA #REQUIRED>\n\t\t<!ATTLIST METRICS TYPE (string | int8 | uint8 | int16 | uint16 | int32 | uint32 | float | double | timestamp) #REQUIRED>\n\t\t<!ATTLIST METRICS UNITS CDATA #IMPLIED>\n\t\t<!ATTLIST METRICS SLOPE (zero | positive | negative | both | unspecified) #IMPLIED>\n\t\t<!ATTLIST METRICS SOURCE (gmond) 'gmond'>\n\t\t]>\n\t\t<GANGLIA_XML VERSION=\"3.6.0\" SOURCE=\"gmetad\">\n\t\t<GRID NAME=\"unspecified\" AUTHORITY=\"http:\/\/monitor.example.com\/ganglia\/\" LOCALTIME=\"1436989284\">\n\t\t<CLUSTER NAME=\"cluster-example.com\" LOCALTIME=\"1436989282\" OWNER=\"example\" LATLONG=\"100\" URL=\"200\">\n\t\t<HOST NAME=\"example.com\" IP=\"127.0.0.1\" REPORTED=\"1436989274\" TN=\"9\" TMAX=\"20\" DMAX=\"0\" LOCATION=\"unspecified\" GMOND_STARTED=\"1411930720\" TAGS=\"unspecified\">\n\t\t<METRIC NAME=\"disk_free\" VAL=\"1106.528\" TYPE=\"double\" UNITS=\"GB\" TN=\"117\" TMAX=\"180\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"disk\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Total free disk space\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Disk Space Available\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"bytes_out\" VAL=\"7167.18\" TYPE=\"float\" UNITS=\"bytes\/sec\" TN=\"277\" TMAX=\"300\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"network\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Number of bytes out per second\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Bytes Sent\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"disk_free\" VAL=\"945.966\" TYPE=\"double\" UNITS=\"GB\" TN=\"80\" TMAX=\"180\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"disk\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Total free disk space\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Disk Space Available\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"swap_total\" VAL=\"2102460\" TYPE=\"float\" UNITS=\"KB\" TN=\"21\" TMAX=\"1200\" DMAX=\"0\" SLOPE=\"zero\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"memory\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Total amount of swap space displayed in KBs\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Swap Space Total\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"part_max_used\" VAL=\"60.5\" TYPE=\"float\" UNITS=\"%\" TN=\"80\" TMAX=\"180\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"disk\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Maximum percent used for all partitions\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Maximum Disk Space Used\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<\/HOST>\n\t\t<\/CLUSTER>\n\t\t<\/GRID>\n\t\t<\/GANGLIA_XML>`)\n\tresXml := ParseXML(testXml)\n\tif resXml.Version != \"3.6.0\" {\n\t\tt.Error(\"Wrong Version\")\n\t}\n\tif resXml.Source != \"gmetad\" {\n\t\tt.Error(\"Wrong source\")\n\t}\n\tif len(resXml.Grids) != 1 {\n\t\tt.Error(\"Wrong number of grids in the cluster\")\n\t}\n\tgrid := resXml.Grids[0]\n\tif grid.Name != \"unspecified\" {\n\t\tt.Error(\"Wrong name of the grid\")\n\t}\n\tif grid.Authority != \"http:\/\/monitor.example.com\/ganglia\/\" {\n\t\tt.Error(\"Wrong Authority of the grid\")\n\t}\n}\n<commit_msg>imroved tests<commit_after>package api\n\nimport \"testing\"\n\nvar (\n\ttestXml = []byte(`<?xml version=\"1.0\" encoding=\"ISO-8859-1\" standalone=\"yes\"?>\n\t\t<!DOCTYPE GANGLIA_XML [\n\t\t<!ELEMENT GANGLIA_XML (GRID|CLUSTER|HOST)*>\n\t\t<!ATTLIST GANGLIA_XML VERSION CDATA #REQUIRED>\n\t\t<!ATTLIST GANGLIA_XML SOURCE CDATA #REQUIRED>\n\t\t<!ELEMENT GRID (CLUSTER | GRID | HOSTS | METRICS)*>\n\t\t<!ATTLIST GRID NAME CDATA #REQUIRED>\n\t\t<!ATTLIST GRID AUTHORITY CDATA #REQUIRED>\n\t\t<!ATTLIST GRID LOCALTIME CDATA #IMPLIED>\n\t\t<!ELEMENT CLUSTER (HOST | HOSTS | METRICS)*>\n\t\t<!ATTLIST CLUSTER NAME CDATA #REQUIRED>\n\t\t<!ATTLIST CLUSTER OWNER CDATA #IMPLIED>\n\t\t<!ATTLIST CLUSTER LATLONG CDATA #IMPLIED>\n\t\t<!ATTLIST CLUSTER URL CDATA #IMPLIED>\n\t\t<!ATTLIST CLUSTER LOCALTIME CDATA #REQUIRED>\n\t\t<!ELEMENT HOST (METRIC)*>\n\t\t<!ATTLIST HOST NAME CDATA #REQUIRED>\n\t\t<!ATTLIST HOST IP CDATA #REQUIRED>\n\t\t<!ATTLIST HOST LOCATION CDATA #IMPLIED>\n\t\t<!ATTLIST HOST TAGS CDATA #IMPLIED>\n\t\t<!ATTLIST HOST REPORTED CDATA #REQUIRED>\n\t\t<!ATTLIST HOST TN CDATA #IMPLIED>\n\t\t<!ATTLIST HOST TMAX CDATA #IMPLIED>\n\t\t<!ATTLIST HOST DMAX CDATA #IMPLIED>\n\t\t<!ATTLIST HOST GMOND_STARTED CDATA #IMPLIED>\n\t\t<!ELEMENT METRIC (EXTRA_DATA*)>\n\t\t<!ATTLIST METRIC NAME CDATA #REQUIRED>\n\t\t<!ATTLIST METRIC VAL CDATA #REQUIRED>\n\t\t<!ATTLIST METRIC TYPE (string | int8 | uint8 | int16 | uint16 | int32 | uint32 | float | double | timestamp) #REQUIRED>\n\t\t<!ATTLIST METRIC UNITS CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC TN CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC TMAX CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC DMAX CDATA #IMPLIED>\n\t\t<!ATTLIST METRIC SLOPE (zero | positive | negative | both | unspecified) #IMPLIED>\n\t\t<!ATTLIST METRIC SOURCE (gmond) 'gmond'>\n\t\t<!ELEMENT EXTRA_DATA (EXTRA_ELEMENT*)>\n\t\t<!ELEMENT EXTRA_ELEMENT EMPTY>\n\t\t<!ATTLIST EXTRA_ELEMENT NAME CDATA #REQUIRED>\n\t\t<!ATTLIST EXTRA_ELEMENT VAL CDATA #REQUIRED>\n\t\t<!ELEMENT HOSTS EMPTY>\n\t\t<!ATTLIST HOSTS UP CDATA #REQUIRED>\n\t\t<!ATTLIST HOSTS DOWN CDATA #REQUIRED>\n\t\t<!ATTLIST HOSTS SOURCE (gmond | gmetad) #REQUIRED>\n\t\t<!ELEMENT METRICS (EXTRA_DATA*)>\n\t\t<!ATTLIST METRICS NAME CDATA #REQUIRED>\n\t\t<!ATTLIST METRICS SUM CDATA #REQUIRED>\n\t\t<!ATTLIST METRICS NUM CDATA #REQUIRED>\n\t\t<!ATTLIST METRICS TYPE (string | int8 | uint8 | int16 | uint16 | int32 | uint32 | float | double | timestamp) #REQUIRED>\n\t\t<!ATTLIST METRICS UNITS CDATA #IMPLIED>\n\t\t<!ATTLIST METRICS SLOPE (zero | positive | negative | both | unspecified) #IMPLIED>\n\t\t<!ATTLIST METRICS SOURCE (gmond) 'gmond'>\n\t\t]>\n\t\t<GANGLIA_XML VERSION=\"3.6.0\" SOURCE=\"gmetad\">\n\t\t<GRID NAME=\"unspecified\" AUTHORITY=\"http:\/\/monitor.example.com\/ganglia\/\" LOCALTIME=\"1436989284\">\n\t\t<CLUSTER NAME=\"cluster-example.com\" LOCALTIME=\"1436989282\" OWNER=\"example\" LATLONG=\"100\" URL=\"200\">\n\t\t<HOST NAME=\"example.com\" IP=\"127.0.0.1\" REPORTED=\"1436989274\" TN=\"9\" TMAX=\"20\" DMAX=\"0\" LOCATION=\"unspecified\" GMOND_STARTED=\"1411930720\" TAGS=\"unspecified\">\n\t\t<METRIC NAME=\"disk_free\" VAL=\"1106.528\" TYPE=\"double\" UNITS=\"GB\" TN=\"117\" TMAX=\"180\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"disk\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Total free disk space\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Disk Space Available\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"bytes_out\" VAL=\"7167.18\" TYPE=\"float\" UNITS=\"bytes\/sec\" TN=\"277\" TMAX=\"300\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"network\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Number of bytes out per second\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Bytes Sent\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"disk_free\" VAL=\"945.966\" TYPE=\"double\" UNITS=\"GB\" TN=\"80\" TMAX=\"180\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"disk\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Total free disk space\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Disk Space Available\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"swap_total\" VAL=\"2102460\" TYPE=\"float\" UNITS=\"KB\" TN=\"21\" TMAX=\"1200\" DMAX=\"0\" SLOPE=\"zero\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"memory\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Total amount of swap space displayed in KBs\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Swap Space Total\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<METRIC NAME=\"part_max_used\" VAL=\"60.5\" TYPE=\"float\" UNITS=\"%\" TN=\"80\" TMAX=\"180\" DMAX=\"0\" SLOPE=\"both\" SOURCE=\"gmond\">\n\t\t<EXTRA_DATA>\n\t\t<EXTRA_ELEMENT NAME=\"GROUP\" VAL=\"disk\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"DESC\" VAL=\"Maximum percent used for all partitions\"\/>\n\t\t<EXTRA_ELEMENT NAME=\"TITLE\" VAL=\"Maximum Disk Space Used\"\/>\n\t\t<\/EXTRA_DATA>\n\t\t<\/METRIC>\n\t\t<\/HOST>\n\t\t<\/CLUSTER>\n\t\t<\/GRID>\n\t\t<\/GANGLIA_XML>`)\n)\n\nfunc Test_ParseXML(t *testing.T) {\n\tresXml := ParseXML(testXml)\n\tif resXml.Version != \"3.6.0\" {\n\t\tt.Error(\"Wrong Version\")\n\t}\n\tif resXml.Source != \"gmetad\" {\n\t\tt.Error(\"Wrong source\")\n\t}\n\tif len(resXml.Grids) != 1 {\n\t\tt.Error(\"Wrong number of grids in the cluster\")\n\t}\n\tgrid := resXml.Grids[0]\n\tif grid.Name != \"unspecified\" {\n\t\tt.Error(\"Wrong name of the grid\")\n\t}\n\tif grid.Authority != \"http:\/\/monitor.example.com\/ganglia\/\" {\n\t\tt.Error(\"Wrong Authority of the grid\")\n\t}\n}\n\nfunc Test_Parse(t *testing.T) {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package intcode\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/bewuethr\/advent-of-code\/go\/convert\"\n\t\"github.com\/bewuethr\/advent-of-code\/go\/log\"\n)\n\nconst (\n\t\/\/ Instructions\n\tadd = 1\n\tmult = 2\n\tinput = 3\n\toutput = 4\n\tjumpIfTrue = 5\n\tjumpIfFalse = 6\n\tlessThan = 7\n\tequals = 8\n\thalt = 99\n\n\t\/\/ Modes\n\tpositionMode = 0\n\timmediateMode = 1\n)\n\n\/\/ nargs maps instructions to the number of arguments they use.\nvar nargs = map[int]int{\n\tadd: 3,\n\tmult: 3,\n\tinput: 1,\n\toutput: 1,\n\tjumpIfTrue: 2,\n\tjumpIfFalse: 2,\n\tlessThan: 3,\n\tequals: 3,\n\thalt: 0,\n}\n\n\/\/ methodMap is a map from instructions to the corresponding Computer methods.\nvar methodMap = map[int]func(*Computer, []int){\n\tadd: (*Computer).add,\n\tmult: (*Computer).mult,\n\tinput: (*Computer).input,\n\toutput: (*Computer).output,\n\tjumpIfTrue: (*Computer).jumpIfTrue,\n\tjumpIfFalse: (*Computer).jumpIfFalse,\n\tlessThan: (*Computer).lessThan,\n\tequals: (*Computer).equals,\n}\n\n\/\/ Computer is an opcode computer.\ntype Computer struct {\n\topcodes []int\n\tinputVals []int\n\tinstrPtr int\n}\n\n\/\/ NewComputer returns an opcode computer with its memory initalized to opcodes.\nfunc NewComputer(opcodes []int) *Computer {\n\treturn &Computer{\n\t\topcodes: opcodes,\n\t\tinstrPtr: 0,\n\t}\n}\n\n\/\/ RunProgram executes the program in the memory of the computer.\nfunc (c *Computer) RunProgram(inputVals ...int) error {\n\tc.inputVals = inputVals\n\tfor {\n\t\tcode, params := c.parseInstruction(c.opcodes[c.instrPtr])\n\n\t\tif code == halt {\n\t\t\treturn nil\n\t\t}\n\n\t\tmethodMap[code](c, params)\n\t}\n}\n\nfunc (c *Computer) add(params []int) {\n\tc.opcodes[c.opcodes[c.instrPtr+3]] = params[0] + params[1]\n\tc.instrPtr += nargs[add] + 1\n}\n\nfunc (c *Computer) mult(params []int) {\n\tc.opcodes[c.opcodes[c.instrPtr+3]] = params[0] * params[1]\n\tc.instrPtr += nargs[mult] + 1\n}\n\nfunc (c *Computer) input(params []int) {\n\tc.opcodes[c.opcodes[c.instrPtr+1]] = c.inputVals[0]\n\tc.inputVals = c.inputVals[1:]\n\tc.instrPtr += nargs[input] + 1\n}\n\nfunc (c *Computer) output(params []int) {\n\tfmt.Println(params[0])\n\tc.instrPtr += nargs[output] + 1\n}\n\nfunc (c *Computer) jumpIfTrue(params []int) {\n\tif params[0] != 0 {\n\t\tc.instrPtr = params[1]\n\t} else {\n\t\tc.instrPtr += nargs[jumpIfTrue] + 1\n\t}\n}\n\nfunc (c *Computer) jumpIfFalse(params []int) {\n\tif params[0] == 0 {\n\t\tc.instrPtr = params[1]\n\t} else {\n\t\tc.instrPtr += nargs[jumpIfFalse] + 1\n\t}\n}\n\nfunc (c *Computer) lessThan(params []int) {\n\tif params[0] < params[1] {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 1\n\t} else {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 0\n\t}\n\tc.instrPtr += nargs[lessThan] + 1\n}\n\nfunc (c *Computer) equals(params []int) {\n\tif params[0] == params[1] {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 1\n\t} else {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 0\n\t}\n\tc.instrPtr += nargs[equals] + 1\n}\n\n\/\/ parseInstruction reads a value from memory and extracts the opcode as well\n\/\/ as the parameter values for the instruction, taking the parameter mode into\n\/\/ account.\nfunc (c *Computer) parseInstruction(val int) (code int, params []int) {\n\tcode = val % 100\n\tvar modes []int\n\tif valStr := strconv.Itoa(val); len(valStr) > 2 {\n\t\tvalStr = valStr[:len(valStr)-2]\n\n\t\tvar modesStr []string\n\t\tfor _, m := range valStr {\n\t\t\tmodesStr = append([]string{string(m)}, modesStr...)\n\t\t}\n\n\t\tvar err error\n\t\tmodes, err = convert.StrSliceToInt(modesStr)\n\t\tif err != nil {\n\t\t\tlog.Die(\"converting modes to int\", err)\n\t\t}\n\t}\n\n\tfor len(modes) < nargs[code] {\n\t\tmodes = append(modes, 0)\n\t}\n\n\treturn code, c.getParams(modes)\n}\n\n\/\/ getParams takes a slice of parameter modes and returns the corresponding\n\/\/ parameter values based on the current value of the instruction pointer.\nfunc (c *Computer) getParams(modes []int) []int {\n\tvar params []int\n\n\tfor i := 0; i < len(modes); i++ {\n\t\tvar param int\n\t\tif modes[i] == immediateMode {\n\t\t\tparam = c.opcodes[c.instrPtr+i+1]\n\t\t} else {\n\t\t\tparam = c.opcodes[c.opcodes[c.instrPtr+i+1]]\n\t\t}\n\t\tparams = append(params, param)\n\t}\n\n\treturn params\n}\n<commit_msg>Remove log dependency from intcode package<commit_after>\/\/ Package intcode implements an intocde computer.\npackage intcode\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/bewuethr\/advent-of-code\/go\/convert\"\n)\n\nconst (\n\t\/\/ Instructions\n\tadd = 1\n\tmult = 2\n\tinput = 3\n\toutput = 4\n\tjumpIfTrue = 5\n\tjumpIfFalse = 6\n\tlessThan = 7\n\tequals = 8\n\thalt = 99\n\n\t\/\/ Modes\n\tpositionMode = 0\n\timmediateMode = 1\n)\n\n\/\/ nargs maps instructions to the number of arguments they use.\nvar nargs = map[int]int{\n\tadd: 3,\n\tmult: 3,\n\tinput: 1,\n\toutput: 1,\n\tjumpIfTrue: 2,\n\tjumpIfFalse: 2,\n\tlessThan: 3,\n\tequals: 3,\n\thalt: 0,\n}\n\n\/\/ methodMap is a map from instructions to the corresponding Computer methods.\nvar methodMap = map[int]func(*Computer, []int){\n\tadd: (*Computer).add,\n\tmult: (*Computer).mult,\n\tinput: (*Computer).input,\n\toutput: (*Computer).output,\n\tjumpIfTrue: (*Computer).jumpIfTrue,\n\tjumpIfFalse: (*Computer).jumpIfFalse,\n\tlessThan: (*Computer).lessThan,\n\tequals: (*Computer).equals,\n}\n\n\/\/ Computer is an opcode computer.\ntype Computer struct {\n\topcodes []int\n\tinputVals []int\n\tinstrPtr int\n}\n\n\/\/ NewComputer returns an opcode computer with its memory initalized to opcodes.\nfunc NewComputer(opcodes []int) *Computer {\n\treturn &Computer{\n\t\topcodes: opcodes,\n\t\tinstrPtr: 0,\n\t}\n}\n\n\/\/ RunProgram executes the program in the memory of the computer.\nfunc (c *Computer) RunProgram(inputVals ...int) error {\n\tc.inputVals = inputVals\n\tfor {\n\t\tcode, params, err := c.parseInstruction(c.opcodes[c.instrPtr])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif code == halt {\n\t\t\treturn nil\n\t\t}\n\n\t\tmethodMap[code](c, params)\n\t}\n}\n\nfunc (c *Computer) add(params []int) {\n\tc.opcodes[c.opcodes[c.instrPtr+3]] = params[0] + params[1]\n\tc.instrPtr += nargs[add] + 1\n}\n\nfunc (c *Computer) mult(params []int) {\n\tc.opcodes[c.opcodes[c.instrPtr+3]] = params[0] * params[1]\n\tc.instrPtr += nargs[mult] + 1\n}\n\nfunc (c *Computer) input(params []int) {\n\tc.opcodes[c.opcodes[c.instrPtr+1]] = c.inputVals[0]\n\tc.inputVals = c.inputVals[1:]\n\tc.instrPtr += nargs[input] + 1\n}\n\nfunc (c *Computer) output(params []int) {\n\tfmt.Println(params[0])\n\tc.instrPtr += nargs[output] + 1\n}\n\nfunc (c *Computer) jumpIfTrue(params []int) {\n\tif params[0] != 0 {\n\t\tc.instrPtr = params[1]\n\t} else {\n\t\tc.instrPtr += nargs[jumpIfTrue] + 1\n\t}\n}\n\nfunc (c *Computer) jumpIfFalse(params []int) {\n\tif params[0] == 0 {\n\t\tc.instrPtr = params[1]\n\t} else {\n\t\tc.instrPtr += nargs[jumpIfFalse] + 1\n\t}\n}\n\nfunc (c *Computer) lessThan(params []int) {\n\tif params[0] < params[1] {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 1\n\t} else {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 0\n\t}\n\tc.instrPtr += nargs[lessThan] + 1\n}\n\nfunc (c *Computer) equals(params []int) {\n\tif params[0] == params[1] {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 1\n\t} else {\n\t\tc.opcodes[c.opcodes[c.instrPtr+3]] = 0\n\t}\n\tc.instrPtr += nargs[equals] + 1\n}\n\n\/\/ parseInstruction reads a value from memory and extracts the opcode as well\n\/\/ as the parameter values for the instruction, taking the parameter mode into\n\/\/ account.\nfunc (c *Computer) parseInstruction(val int) (code int, params []int, err error) {\n\tcode = val % 100\n\tvar modes []int\n\tif valStr := strconv.Itoa(val); len(valStr) > 2 {\n\t\tvalStr = valStr[:len(valStr)-2]\n\n\t\tvar modesStr []string\n\t\tfor _, m := range valStr {\n\t\t\tmodesStr = append([]string{string(m)}, modesStr...)\n\t\t}\n\n\t\tvar err error\n\t\tmodes, err = convert.StrSliceToInt(modesStr)\n\t\tif err != nil {\n\t\t\treturn 0, nil, fmt.Errorf(\"converting modes %v to int: %w\", modesStr, err)\n\t\t}\n\t}\n\n\tfor len(modes) < nargs[code] {\n\t\tmodes = append(modes, 0)\n\t}\n\n\treturn code, c.getParams(modes), nil\n}\n\n\/\/ getParams takes a slice of parameter modes and returns the corresponding\n\/\/ parameter values based on the current value of the instruction pointer.\nfunc (c *Computer) getParams(modes []int) []int {\n\tvar params []int\n\n\tfor i := 0; i < len(modes); i++ {\n\t\tvar param int\n\t\tif modes[i] == immediateMode {\n\t\t\tparam = c.opcodes[c.instrPtr+i+1]\n\t\t} else {\n\t\t\tparam = c.opcodes[c.opcodes[c.instrPtr+i+1]]\n\t\t}\n\t\tparams = append(params, param)\n\t}\n\n\treturn params\n}\n<|endoftext|>"} {"text":"<commit_before>package paddlecloud\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/PaddlePaddle\/cloud\/go\/utils\/restclient\"\n\t\"github.com\/google\/subcommands\"\n)\n\n\/\/ GetCommand exports get subcommand for fetching status\ntype GetCommand struct {\n\ta bool\n}\n\n\/\/ Name is subcommands name\nfunc (*GetCommand) Name() string { return \"get\" }\n\n\/\/ Synopsis is subcommands synopsis\nfunc (*GetCommand) Synopsis() string { return \"Print resources\" }\n\n\/\/ Usage is subcommands usage\nfunc (*GetCommand) Usage() string {\n\treturn `get [jobs|workers|registry [jobname]|quota]:\n\tPrint resources.\n`\n}\n\n\/\/ SetFlags registers subcommands flags\nfunc (p *GetCommand) SetFlags(f *flag.FlagSet) {\n\tf.BoolVar(&p.a, \"a\", false, \"Get all resources.\")\n}\n\n\/\/ Execute get command\nfunc (p *GetCommand) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {\n\tif f.NArg() < 1 || f.NArg() > 2 {\n\t\tf.Usage()\n\t\treturn subcommands.ExitFailure\n\t}\n\tif f.Arg(0) == \"jobs\" {\n\t\tjobs()\n\t} else if f.Arg(0) == \"quota\" {\n\t\tquota()\n\t} else if f.Arg(0) == \"registry\" {\n\t\tregistry()\n\t} else if f.Arg(0) == \"workers\" {\n\t\tif f.NArg() != 2 {\n\t\t\tf.Usage()\n\t\t\treturn subcommands.ExitFailure\n\t\t}\n\t\tworkers(f.Arg(1))\n\t} else {\n\t\tf.Usage()\n\t\treturn subcommands.ExitFailure\n\t}\n\n\treturn subcommands.ExitSuccess\n}\n\nfunc workers(jobname string) error {\n\tqueryMap := url.Values{}\n\tqueryMap.Add(\"jobname\", jobname)\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/workers\/\", queryMap)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting workers: %v\\n\", err)\n\t\treturn err\n\t}\n\tvar respObj interface{}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"bad server return: %s\", respBody)\n\t\treturn err\n\t}\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\n\tfmt.Fprintln(w, \"NAME\\tSTATUS\\tSTART\\tEXIT_CODE\\tMSG\\t\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error parsing: %s\", err)\n\t\treturn err\n\t}\n\tfor _, item := range respObj.(map[string]interface{})[\"items\"].([]interface{}) {\n\t\tvar exitCode, msg interface{}\n\t\tterminateState := item.(map[string]interface{})[\"status\"].(map[string]interface{})[\"container_statuses\"].([]interface{})[0].(map[string]interface{})[\"state\"].(map[string]interface{})[\"terminated\"]\n\n\t\tif terminateState != nil {\n\t\t\texitCode = terminateState.(map[string]interface{})[\"exit_code\"]\n\t\t\tmsg = terminateState.(map[string]interface{})[\"message\"]\n\t\t}\n\n\t\tfmt.Fprintf(w, \"%s\\t%s\\t%v\\t%v\\t%v\\t\\n\",\n\t\t\titem.(map[string]interface{})[\"metadata\"].(map[string]interface{})[\"name\"].(string),\n\t\t\titem.(map[string]interface{})[\"status\"].(map[string]interface{})[\"phase\"].(string),\n\t\t\titem.(map[string]interface{})[\"status\"].(map[string]interface{})[\"start_time\"],\n\t\t\texitCode, msg)\n\t}\n\tw.Flush()\n\treturn nil\n}\nfunc registry() error {\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/registry\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"err getting registry secret: %v\\n\", err)\n\t\treturn err\n\t}\n\tvar respObj interface{}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\titems := respObj.(map[string]interface{})[\"msg\"].(map[string]interface{})[\"items\"].([]interface{})\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tif len(items) >= 0 {\n\t\tfmt.Fprintf(w, \"ID\\tNAME\\tDATA\\n\")\n\t}\n\tidx := 0\n\tfor _, r := range items {\n\t\tmetadata := r.(map[string]interface{})[\"metadata\"].(map[string]interface{})\n\t\tname := RegistryName(metadata[\"name\"].(string))\n\t\tif len(name) != 0 {\n\t\t\tcTime := metadata[\"creation_timestamp\"].(string)\n\t\t\tfmt.Fprintf(w, \"%d\\t%s\\t%s\\n\", idx, name, cTime)\n\t\t\tidx++\n\t\t}\n\t}\n\tw.Flush()\n\treturn err\n}\n\nfunc jobs() error {\n\t\/\/ NOTE: a job include pserver replicaset and a trainers job, display them\n\t\/\/ get pserver replicaset\n\t\/\/ \"status\": {\n\t\/\/ \"available_replicas\": 1,\n\t\/\/ \"conditions\": null,\n\t\/\/ \"fully_labeled_replicas\": 1,\n\t\/\/ \"observed_generation\": 1,\n\t\/\/ \"ready_replicas\": 1,\n\t\/\/ \"replicas\": 1\n\tvar respObj interface{}\n\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/pservers\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting pservers: %v\\n\", err)\n\t\treturn err\n\t}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpserverItems := respObj.(map[string]interface{})[\"items\"].([]interface{})\n\n\t\/\/ get kubernetes jobs info\n\trespBody, err = restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/jobs\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting jobs: %v\\n\", err)\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\titems := respObj.(map[string]interface{})[\"items\"].([]interface{})\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tif len(items) >= 0 {\n\t\tfmt.Fprintf(w, \"NAME\\tACTIVE\\tSUCC\\tFAIL\\tSTART\\tCOMP\\tPS_READY\\tPS_TOTAL\\t\\n\")\n\t}\n\tfor _, j := range items {\n\t\tjobnameTrainer := j.(map[string]interface{})[\"metadata\"].(map[string]interface{})[\"name\"].(string)\n\t\tjobnameParts := strings.Split(jobnameTrainer, \"-\")\n\t\tjobname := strings.Join(jobnameParts[0:len(jobnameParts)-1], \"-\")\n\t\t\/\/ get info for job pservers\n\t\tvar psrsname string\n\t\tvar readyReplicas, replicas interface{}\n\t\tfor _, psrs := range pserverItems {\n\t\t\tpsrsname = psrs.(map[string]interface{})[\"metadata\"].(map[string]interface{})[\"name\"].(string)\n\t\t\tif psrsname == jobname+\"-pserver\" {\n\t\t\t\treadyReplicas = psrs.(map[string]interface{})[\"status\"].(map[string]interface{})[\"ready_replicas\"]\n\t\t\t\treplicas = psrs.(map[string]interface{})[\"status\"].(map[string]interface{})[\"replicas\"]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(w, \"%s\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t\\n\",\n\t\t\tjobname,\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"active\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"succeeded\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"failed\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"start_time\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"completion_time\"],\n\t\t\treadyReplicas, replicas)\n\t}\n\tw.Flush()\n\n\treturn err\n}\n\nfunc quota() error {\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/quota\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting quota: %v\\n\", err)\n\t\treturn err\n\t}\n\tvar respObj interface{}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tfmt.Fprintf(w, \"RESOURCE\\tLIMIT\\t\\n\")\n\tfor _, item := range respObj.(map[string]interface{})[\"items\"].([]interface{}) {\n\t\tfmt.Fprintf(w, \"-----\\t-----\\t\\n\")\n\t\thardLimits := item.(map[string]interface{})[\"status\"].(map[string]interface{})[\"hard\"].(map[string]interface{})\n\t\tfor k, v := range hardLimits {\n\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\\n\", k, v.(string))\n\t\t}\n\t}\n\tw.Flush()\n\treturn nil\n}\n<commit_msg>Format quota print (#205)<commit_after>package paddlecloud\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/PaddlePaddle\/cloud\/go\/utils\/restclient\"\n\t\"github.com\/google\/subcommands\"\n)\n\n\/\/ GetCommand exports get subcommand for fetching status\ntype GetCommand struct {\n\ta bool\n}\n\n\/\/ Name is subcommands name\nfunc (*GetCommand) Name() string { return \"get\" }\n\n\/\/ Synopsis is subcommands synopsis\nfunc (*GetCommand) Synopsis() string { return \"Print resources\" }\n\n\/\/ Usage is subcommands usage\nfunc (*GetCommand) Usage() string {\n\treturn `get [jobs|workers|registry [jobname]|quota]:\n\tPrint resources.\n`\n}\n\n\/\/ SetFlags registers subcommands flags\nfunc (p *GetCommand) SetFlags(f *flag.FlagSet) {\n\tf.BoolVar(&p.a, \"a\", false, \"Get all resources.\")\n}\n\n\/\/ Execute get command\nfunc (p *GetCommand) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {\n\tif f.NArg() < 1 || f.NArg() > 2 {\n\t\tf.Usage()\n\t\treturn subcommands.ExitFailure\n\t}\n\tif f.Arg(0) == \"jobs\" {\n\t\tjobs()\n\t} else if f.Arg(0) == \"quota\" {\n\t\tquota()\n\t} else if f.Arg(0) == \"registry\" {\n\t\tregistry()\n\t} else if f.Arg(0) == \"workers\" {\n\t\tif f.NArg() != 2 {\n\t\t\tf.Usage()\n\t\t\treturn subcommands.ExitFailure\n\t\t}\n\t\tworkers(f.Arg(1))\n\t} else {\n\t\tf.Usage()\n\t\treturn subcommands.ExitFailure\n\t}\n\n\treturn subcommands.ExitSuccess\n}\n\nfunc workers(jobname string) error {\n\tqueryMap := url.Values{}\n\tqueryMap.Add(\"jobname\", jobname)\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/workers\/\", queryMap)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting workers: %v\\n\", err)\n\t\treturn err\n\t}\n\tvar respObj interface{}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"bad server return: %s\", respBody)\n\t\treturn err\n\t}\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\n\tfmt.Fprintln(w, \"NAME\\tSTATUS\\tSTART\\tEXIT_CODE\\tMSG\\t\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error parsing: %s\", err)\n\t\treturn err\n\t}\n\tfor _, item := range respObj.(map[string]interface{})[\"items\"].([]interface{}) {\n\t\tvar exitCode, msg interface{}\n\t\tterminateState := item.(map[string]interface{})[\"status\"].(map[string]interface{})[\"container_statuses\"].([]interface{})[0].(map[string]interface{})[\"state\"].(map[string]interface{})[\"terminated\"]\n\n\t\tif terminateState != nil {\n\t\t\texitCode = terminateState.(map[string]interface{})[\"exit_code\"]\n\t\t\tmsg = terminateState.(map[string]interface{})[\"message\"]\n\t\t}\n\n\t\tfmt.Fprintf(w, \"%s\\t%s\\t%v\\t%v\\t%v\\t\\n\",\n\t\t\titem.(map[string]interface{})[\"metadata\"].(map[string]interface{})[\"name\"].(string),\n\t\t\titem.(map[string]interface{})[\"status\"].(map[string]interface{})[\"phase\"].(string),\n\t\t\titem.(map[string]interface{})[\"status\"].(map[string]interface{})[\"start_time\"],\n\t\t\texitCode, msg)\n\t}\n\tw.Flush()\n\treturn nil\n}\nfunc registry() error {\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/registry\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"err getting registry secret: %v\\n\", err)\n\t\treturn err\n\t}\n\tvar respObj interface{}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\titems := respObj.(map[string]interface{})[\"msg\"].(map[string]interface{})[\"items\"].([]interface{})\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tif len(items) >= 0 {\n\t\tfmt.Fprintf(w, \"ID\\tNAME\\tDATA\\n\")\n\t}\n\tidx := 0\n\tfor _, r := range items {\n\t\tmetadata := r.(map[string]interface{})[\"metadata\"].(map[string]interface{})\n\t\tname := RegistryName(metadata[\"name\"].(string))\n\t\tif len(name) != 0 {\n\t\t\tcTime := metadata[\"creation_timestamp\"].(string)\n\t\t\tfmt.Fprintf(w, \"%d\\t%s\\t%s\\n\", idx, name, cTime)\n\t\t\tidx++\n\t\t}\n\t}\n\tw.Flush()\n\treturn err\n}\n\nfunc jobs() error {\n\t\/\/ NOTE: a job include pserver replicaset and a trainers job, display them\n\t\/\/ get pserver replicaset\n\t\/\/ \"status\": {\n\t\/\/ \"available_replicas\": 1,\n\t\/\/ \"conditions\": null,\n\t\/\/ \"fully_labeled_replicas\": 1,\n\t\/\/ \"observed_generation\": 1,\n\t\/\/ \"ready_replicas\": 1,\n\t\/\/ \"replicas\": 1\n\tvar respObj interface{}\n\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/pservers\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting pservers: %v\\n\", err)\n\t\treturn err\n\t}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpserverItems := respObj.(map[string]interface{})[\"items\"].([]interface{})\n\n\t\/\/ get kubernetes jobs info\n\trespBody, err = restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/jobs\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting jobs: %v\\n\", err)\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\titems := respObj.(map[string]interface{})[\"items\"].([]interface{})\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tif len(items) >= 0 {\n\t\tfmt.Fprintf(w, \"NAME\\tACTIVE\\tSUCC\\tFAIL\\tSTART\\tCOMP\\tPS_READY\\tPS_TOTAL\\t\\n\")\n\t}\n\tfor _, j := range items {\n\t\tjobnameTrainer := j.(map[string]interface{})[\"metadata\"].(map[string]interface{})[\"name\"].(string)\n\t\tjobnameParts := strings.Split(jobnameTrainer, \"-\")\n\t\tjobname := strings.Join(jobnameParts[0:len(jobnameParts)-1], \"-\")\n\t\t\/\/ get info for job pservers\n\t\tvar psrsname string\n\t\tvar readyReplicas, replicas interface{}\n\t\tfor _, psrs := range pserverItems {\n\t\t\tpsrsname = psrs.(map[string]interface{})[\"metadata\"].(map[string]interface{})[\"name\"].(string)\n\t\t\tif psrsname == jobname+\"-pserver\" {\n\t\t\t\treadyReplicas = psrs.(map[string]interface{})[\"status\"].(map[string]interface{})[\"ready_replicas\"]\n\t\t\t\treplicas = psrs.(map[string]interface{})[\"status\"].(map[string]interface{})[\"replicas\"]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(w, \"%s\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t\\n\",\n\t\t\tjobname,\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"active\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"succeeded\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"failed\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"start_time\"],\n\t\t\tj.(map[string]interface{})[\"status\"].(map[string]interface{})[\"completion_time\"],\n\t\t\treadyReplicas, replicas)\n\t}\n\tw.Flush()\n\n\treturn err\n}\n\nfunc quota() error {\n\trespBody, err := restclient.GetCall(Config.ActiveConfig.Endpoint+\"\/api\/v1\/quota\/\", nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting quota: %v\\n\", err)\n\t\treturn err\n\t}\n\tvar respObj interface{}\n\terr = json.Unmarshal(respBody, &respObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tfmt.Fprintf(w, \"RESOURCE\\tLIMIT\\t\\n\")\n\tfor _, item := range respObj.(map[string]interface{})[\"items\"].([]interface{}) {\n\t\tfmt.Fprintf(w, \"-----\\t-----\\t\\n\")\n\t\thardLimits := item.(map[string]interface{})[\"status\"].(map[string]interface{})[\"hard\"].(map[string]interface{})\n\t\tfmt.Fprintf(w, \"Memory\\t%s\\t\\n\", hardLimits[\"requests.memory\"])\n\t\tfmt.Fprintf(w, \"CPU\\t%s\\t\\n\", hardLimits[\"requests.cpu\"])\n\t\tfmt.Fprintf(w, \"GPU\\t%s\\t\\n\", hardLimits[\"alpha.kubernetes.io\/nvidia-gpu\"])\n\t\tfmt.Fprintf(w, \"MaxParallism\\t%s\\t\\n\", hardLimits[\"pods\"])\n\t}\n\tw.Flush()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package update\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestPullImage(t *testing.T) {\n\t\/\/ pull is OK\n\tmsgStream1 := `{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}`\n\t\/\/ pull errored\n\tmsgStream2 := `{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"error\": \"something bad happened\"}`\n\t\/\/ msg stream is cut off\n\tmsgStream3 := `{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"ba`\n\n\ttestImageRepo := \"quay.io\/protonet\/dummy\"\n\n\tvar testHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path != \"\/images\/create\" {\n\t\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\t}\n\n\t\t\/\/ https:\/\/docs.docker.com\/engine\/reference\/api\/docker_remote_api_v1.22\/#\/create-an-image\n\t\tquery := r.URL.Query()\n\t\tfromImage := query.Get(\"fromImage\")\n\t\ttag := query.Get(\"tag\")\n\n\t\tif fromImage == \"\" || tag == \"\" {\n\t\t\thttp.Error(w, \"Missing parameter 'fromImage' or 'tag'\", http.StatusBadRequest)\n\t\t}\n\n\t\tif fromImage != testImageRepo {\n\t\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\t}\n\n\t\tswitch tag {\n\t\tcase \"v1\":\n\t\t\t_, err := w.Write([]byte(msgStream1))\n\t\t\tassert.Nil(t, err)\n\t\tcase \"v2\":\n\t\t\t_, err := w.Write([]byte(msgStream2))\n\t\t\tassert.Nil(t, err)\n\t\tcase \"v3\":\n\t\t\t_, err := w.Write([]byte(msgStream3))\n\t\t\tassert.Nil(t, err)\n\t\tdefault:\n\t\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\t}\n\t}\n\n\tsrv := httptest.NewServer(testHandler)\n\n\tos.Setenv(\"DOCKER_HOST\", srv.URL)\n\tdefer os.Setenv(\"DOCKER_HOST\", \"\")\n\n\t\/\/ pull is OK\n\terr := pullImage(testImageRepo, \"v1\")\n\tassert.Nil(t, err)\n\n\t\/\/ pull errored\n\terr = pullImage(testImageRepo, \"v2\")\n\tassert.EqualError(t, err, \"Docker error: something bad happened\")\n\n\t\/\/ msg stream is cut off\n\terr = pullImage(testImageRepo, \"v3\")\n\tassert.Equal(t, err, io.ErrUnexpectedEOF)\n}\n\n\/\/ TestExportDockerImage tests the functionality of streaming the data\n\/\/ from a image's rootfs by exporting a pristine container\nfunc TestExportDockerImage(t *testing.T) {\n\ttestContainerID := \"b99a1defb349\"\n\ttestTARBase64 := \"H4sICJJWblgAA2FyY2hpdmUudGFyAO2VUQ7CIAxA+fYUvcFaCuw86Fhi5tS4mez4wrbol0uWCGrG++lPQ0sfpLYQ0UHEUmsYo5kiSjXFGSBGZqVlyQRIUkkpQMdvTYh719ubb6WpLq09d83xTZ5Pq+uFc+Z7POOfYIt99Bewwj+jUcG\/UZz9pyD4ryLX8PMwSi34J\/36\/0Z6\/4xEAjByXyMb9+8G215PDno39LtvN5NJzuHn9n857n\/O+z8JLkGNNfufiIN\/jVqAXJr3p9i4\/0wms10eO0nUugAQAAA=\"\n\ttestTARBase64Buffer := bytes.NewBufferString(testTARBase64)\n\ttestTARBuffer := bytes.NewBuffer([]byte{})\n\n\tdec := base64.NewDecoder(base64.StdEncoding, testTARBase64Buffer)\n\tgunzip, err := gzip.NewReader(dec)\n\tassert.Nil(t, err)\n\t_, err = io.Copy(testTARBuffer, gunzip)\n\tassert.Nil(t, err)\n\n\tvar testHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase \"POST\":\n\t\t\tif r.URL.Path == \"\/containers\/create\" {\n\t\t\t\tresponse := struct {\n\t\t\t\t\tID string `json:\"Id\"`\n\t\t\t\t\tWarnings []string\n\t\t\t\t}{ID: testContainerID}\n\t\t\t\tjson.NewEncoder(w).Encode(&response)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"GET\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\/export\", testContainerID) {\n\t\t\t\tw.Header().Add(\"Content-Type\", \"application\/octet-stream\")\n\t\t\t\tw.Write(testTARBuffer.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"DELETE\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\", testContainerID) {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\tassert.Fail(t, \"test tried to access a wrong path\", \"%s %+v\", r.Method, r.URL)\n\t}\n\n\tsrv := httptest.NewServer(testHandler)\n\n\tos.Setenv(\"DOCKER_HOST\", srv.URL)\n\tos.Setenv(\"DOCKER_API_VERSION\", \"1.22\")\n\tdefer os.Setenv(\"DOCKER_HOST\", \"\")\n\tdefer os.Setenv(\"DOCKER_API_VERSION\", \"\")\n\n\t\/\/ direct export to buffer\n\timageBuf := bytes.NewBuffer([]byte{})\n\terr = exportDockerImage(\"repository\", \"tag\", imageBuf)\n\tassert.Nil(t, err)\n\tassert.EqualValues(t, testTARBuffer.Bytes(), imageBuf.Bytes())\n\n\t\/\/ export through pipe\n\timageBuf2 := bytes.NewBuffer([]byte{})\n\tpipeReader, pipeWriter := io.Pipe()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tvar extractErr error\n\tgo func() {\n\t\textractErr = exportDockerImage(\"repository\", \"tag\", bufio.NewWriter(pipeWriter))\n\t\t\/\/ needs to be closed, or io.Copy from the other end will be stuck forever\n\t\tpipeWriter.Close()\n\t\twg.Done()\n\t}()\n\n\tio.Copy(bufio.NewWriter(imageBuf2), bufio.NewReader(pipeReader))\n\twg.Wait()\n}\n\nfunc TestExtractDockerImage(t *testing.T) {\n\ttestContainerID := \"b99a1defb349\"\n\ttestTARBase64 := \"H4sICJJWblgAA2FyY2hpdmUudGFyAO2VUQ7CIAxA+fYUvcFaCuw86Fhi5tS4mez4wrbol0uWCGrG++lPQ0sfpLYQ0UHEUmsYo5kiSjXFGSBGZqVlyQRIUkkpQMdvTYh719ubb6WpLq09d83xTZ5Pq+uFc+Z7POOfYIt99Bewwj+jUcG\/UZz9pyD4ryLX8PMwSi34J\/36\/0Z6\/4xEAjByXyMb9+8G215PDno39LtvN5NJzuHn9n857n\/O+z8JLkGNNfufiIN\/jVqAXJr3p9i4\/0wms10eO0nUugAQAAA=\"\n\ttestTARBase64Buffer := bytes.NewBufferString(testTARBase64)\n\ttestTARBuffer := bytes.NewBuffer([]byte{})\n\n\tdec := base64.NewDecoder(base64.StdEncoding, testTARBase64Buffer)\n\tgunzip, err := gzip.NewReader(dec)\n\tassert.Nil(t, err)\n\t_, err = io.Copy(testTARBuffer, gunzip)\n\tassert.Nil(t, err)\n\n\tvar testHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase \"POST\":\n\t\t\tif r.URL.Path == \"\/containers\/create\" {\n\t\t\t\tresponse := struct {\n\t\t\t\t\tID string `json:\"Id\"`\n\t\t\t\t\tWarnings []string\n\t\t\t\t}{ID: testContainerID}\n\t\t\t\tjson.NewEncoder(w).Encode(&response)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"GET\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\/export\", testContainerID) {\n\t\t\t\tw.Header().Add(\"Content-Type\", \"application\/octet-stream\")\n\t\t\t\tw.Write(testTARBuffer.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"DELETE\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\", testContainerID) {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\tassert.Fail(t, \"test tried to access a wrong path\", \"%s %+v\", r.Method, r.URL)\n\t}\n\n\tsrv := httptest.NewServer(testHandler)\n\n\tos.Setenv(\"DOCKER_HOST\", srv.URL)\n\tos.Setenv(\"DOCKER_API_VERSION\", \"1.22\")\n\tdefer os.Setenv(\"DOCKER_HOST\", \"\")\n\tdefer os.Setenv(\"DOCKER_API_VERSION\", \"\")\n\n\ttempDir, err := ioutil.TempDir(\"\", \"platconf-unittest-\")\n\tassert.Nil(t, err)\n\tdefer os.RemoveAll(tempDir)\n\n\terr = extractDockerImage(\"repository\", \"tag\", tempDir)\n\tassert.Nil(t, err)\n\n\trootInfo, err := ioutil.ReadDir(tempDir)\n\tassert.Nil(t, err)\n\tassert.Len(t, rootInfo, 3)\n\n\t\/\/ 'a'\n\tassert.Equal(t, \"a\", rootInfo[0].Name())\n\tassert.True(t, rootInfo[0].IsDir())\n\taInfo, err := ioutil.ReadDir(path.Join(tempDir, \"a\"))\n\tassert.Nil(t, err)\n\tassert.Len(t, aInfo, 1)\n\n\t\/\/ 'a\/b'\n\tassert.Equal(t, \"b\", aInfo[0].Name())\n\tassert.True(t, aInfo[0].IsDir())\n\tabInfo, err := ioutil.ReadDir(path.Join(tempDir, \"a\", \"b\"))\n\tassert.Nil(t, err)\n\tassert.Len(t, abInfo, 1)\n\n\t\/\/ 'a\/b\/d'\n\tassert.Equal(t, \"d\", abInfo[0].Name())\n\tassert.False(t, abInfo[0].IsDir())\n\tdContent, err := ioutil.ReadFile(path.Join(tempDir, \"a\", \"b\", \"d\"))\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"example text\\n\", string(dContent))\n\n\t\/\/ 'c'\n\tassert.Equal(t, \"c\", rootInfo[1].Name())\n\tassert.True(t, rootInfo[1].IsDir())\n\tcInfo, err := ioutil.ReadDir(path.Join(tempDir, \"c\"))\n\tassert.Nil(t, err)\n\tassert.Len(t, cInfo, 0)\n\n\t\/\/ 'e'\n\tassert.Equal(t, \"e\", rootInfo[2].Name())\n\tassert.Equal(t, \"Lrwxr-xr-x\", rootInfo[2].Mode().String())\n}\n<commit_msg>fix checking symlink status in TestExtractDockerImage<commit_after>package update\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestPullImage(t *testing.T) {\n\t\/\/ pull is OK\n\tmsgStream1 := `{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}`\n\t\/\/ pull errored\n\tmsgStream2 := `{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"error\": \"something bad happened\"}`\n\t\/\/ msg stream is cut off\n\tmsgStream3 := `{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"bar\"}{\"foo\": \"ba`\n\n\ttestImageRepo := \"quay.io\/protonet\/dummy\"\n\n\tvar testHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path != \"\/images\/create\" {\n\t\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\t}\n\n\t\t\/\/ https:\/\/docs.docker.com\/engine\/reference\/api\/docker_remote_api_v1.22\/#\/create-an-image\n\t\tquery := r.URL.Query()\n\t\tfromImage := query.Get(\"fromImage\")\n\t\ttag := query.Get(\"tag\")\n\n\t\tif fromImage == \"\" || tag == \"\" {\n\t\t\thttp.Error(w, \"Missing parameter 'fromImage' or 'tag'\", http.StatusBadRequest)\n\t\t}\n\n\t\tif fromImage != testImageRepo {\n\t\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\t}\n\n\t\tswitch tag {\n\t\tcase \"v1\":\n\t\t\t_, err := w.Write([]byte(msgStream1))\n\t\t\tassert.Nil(t, err)\n\t\tcase \"v2\":\n\t\t\t_, err := w.Write([]byte(msgStream2))\n\t\t\tassert.Nil(t, err)\n\t\tcase \"v3\":\n\t\t\t_, err := w.Write([]byte(msgStream3))\n\t\t\tassert.Nil(t, err)\n\t\tdefault:\n\t\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\t}\n\t}\n\n\tsrv := httptest.NewServer(testHandler)\n\n\tos.Setenv(\"DOCKER_HOST\", srv.URL)\n\tdefer os.Setenv(\"DOCKER_HOST\", \"\")\n\n\t\/\/ pull is OK\n\terr := pullImage(testImageRepo, \"v1\")\n\tassert.Nil(t, err)\n\n\t\/\/ pull errored\n\terr = pullImage(testImageRepo, \"v2\")\n\tassert.EqualError(t, err, \"Docker error: something bad happened\")\n\n\t\/\/ msg stream is cut off\n\terr = pullImage(testImageRepo, \"v3\")\n\tassert.Equal(t, err, io.ErrUnexpectedEOF)\n}\n\n\/\/ TestExportDockerImage tests the functionality of streaming the data\n\/\/ from a image's rootfs by exporting a pristine container\nfunc TestExportDockerImage(t *testing.T) {\n\ttestContainerID := \"b99a1defb349\"\n\ttestTARBase64 := \"H4sICJJWblgAA2FyY2hpdmUudGFyAO2VUQ7CIAxA+fYUvcFaCuw86Fhi5tS4mez4wrbol0uWCGrG++lPQ0sfpLYQ0UHEUmsYo5kiSjXFGSBGZqVlyQRIUkkpQMdvTYh719ubb6WpLq09d83xTZ5Pq+uFc+Z7POOfYIt99Bewwj+jUcG\/UZz9pyD4ryLX8PMwSi34J\/36\/0Z6\/4xEAjByXyMb9+8G215PDno39LtvN5NJzuHn9n857n\/O+z8JLkGNNfufiIN\/jVqAXJr3p9i4\/0wms10eO0nUugAQAAA=\"\n\ttestTARBase64Buffer := bytes.NewBufferString(testTARBase64)\n\ttestTARBuffer := bytes.NewBuffer([]byte{})\n\n\tdec := base64.NewDecoder(base64.StdEncoding, testTARBase64Buffer)\n\tgunzip, err := gzip.NewReader(dec)\n\tassert.Nil(t, err)\n\t_, err = io.Copy(testTARBuffer, gunzip)\n\tassert.Nil(t, err)\n\n\tvar testHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase \"POST\":\n\t\t\tif r.URL.Path == \"\/containers\/create\" {\n\t\t\t\tresponse := struct {\n\t\t\t\t\tID string `json:\"Id\"`\n\t\t\t\t\tWarnings []string\n\t\t\t\t}{ID: testContainerID}\n\t\t\t\tjson.NewEncoder(w).Encode(&response)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"GET\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\/export\", testContainerID) {\n\t\t\t\tw.Header().Add(\"Content-Type\", \"application\/octet-stream\")\n\t\t\t\tw.Write(testTARBuffer.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"DELETE\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\", testContainerID) {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\tassert.Fail(t, \"test tried to access a wrong path\", \"%s %+v\", r.Method, r.URL)\n\t}\n\n\tsrv := httptest.NewServer(testHandler)\n\n\tos.Setenv(\"DOCKER_HOST\", srv.URL)\n\tos.Setenv(\"DOCKER_API_VERSION\", \"1.22\")\n\tdefer os.Setenv(\"DOCKER_HOST\", \"\")\n\tdefer os.Setenv(\"DOCKER_API_VERSION\", \"\")\n\n\t\/\/ direct export to buffer\n\timageBuf := bytes.NewBuffer([]byte{})\n\terr = exportDockerImage(\"repository\", \"tag\", imageBuf)\n\tassert.Nil(t, err)\n\tassert.EqualValues(t, testTARBuffer.Bytes(), imageBuf.Bytes())\n\n\t\/\/ export through pipe\n\timageBuf2 := bytes.NewBuffer([]byte{})\n\tpipeReader, pipeWriter := io.Pipe()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tvar extractErr error\n\tgo func() {\n\t\textractErr = exportDockerImage(\"repository\", \"tag\", bufio.NewWriter(pipeWriter))\n\t\t\/\/ needs to be closed, or io.Copy from the other end will be stuck forever\n\t\tpipeWriter.Close()\n\t\twg.Done()\n\t}()\n\n\tio.Copy(bufio.NewWriter(imageBuf2), bufio.NewReader(pipeReader))\n\twg.Wait()\n}\n\nfunc TestExtractDockerImage(t *testing.T) {\n\ttestContainerID := \"b99a1defb349\"\n\ttestTARBase64 := \"H4sICJJWblgAA2FyY2hpdmUudGFyAO2VUQ7CIAxA+fYUvcFaCuw86Fhi5tS4mez4wrbol0uWCGrG++lPQ0sfpLYQ0UHEUmsYo5kiSjXFGSBGZqVlyQRIUkkpQMdvTYh719ubb6WpLq09d83xTZ5Pq+uFc+Z7POOfYIt99Bewwj+jUcG\/UZz9pyD4ryLX8PMwSi34J\/36\/0Z6\/4xEAjByXyMb9+8G215PDno39LtvN5NJzuHn9n857n\/O+z8JLkGNNfufiIN\/jVqAXJr3p9i4\/0wms10eO0nUugAQAAA=\"\n\ttestTARBase64Buffer := bytes.NewBufferString(testTARBase64)\n\ttestTARBuffer := bytes.NewBuffer([]byte{})\n\n\tdec := base64.NewDecoder(base64.StdEncoding, testTARBase64Buffer)\n\tgunzip, err := gzip.NewReader(dec)\n\tassert.Nil(t, err)\n\t_, err = io.Copy(testTARBuffer, gunzip)\n\tassert.Nil(t, err)\n\n\tvar testHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase \"POST\":\n\t\t\tif r.URL.Path == \"\/containers\/create\" {\n\t\t\t\tresponse := struct {\n\t\t\t\t\tID string `json:\"Id\"`\n\t\t\t\t\tWarnings []string\n\t\t\t\t}{ID: testContainerID}\n\t\t\t\tjson.NewEncoder(w).Encode(&response)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"GET\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\/export\", testContainerID) {\n\t\t\t\tw.Header().Add(\"Content-Type\", \"application\/octet-stream\")\n\t\t\t\tw.Write(testTARBuffer.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\tcase \"DELETE\":\n\t\t\tif r.URL.Path == fmt.Sprintf(\"\/containers\/%s\", testContainerID) {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n\t\tassert.Fail(t, \"test tried to access a wrong path\", \"%s %+v\", r.Method, r.URL)\n\t}\n\n\tsrv := httptest.NewServer(testHandler)\n\n\tos.Setenv(\"DOCKER_HOST\", srv.URL)\n\tos.Setenv(\"DOCKER_API_VERSION\", \"1.22\")\n\tdefer os.Setenv(\"DOCKER_HOST\", \"\")\n\tdefer os.Setenv(\"DOCKER_API_VERSION\", \"\")\n\n\ttempDir, err := ioutil.TempDir(\"\", \"platconf-unittest-\")\n\tassert.Nil(t, err)\n\tdefer os.RemoveAll(tempDir)\n\n\terr = extractDockerImage(\"repository\", \"tag\", tempDir)\n\tassert.Nil(t, err)\n\n\trootInfo, err := ioutil.ReadDir(tempDir)\n\tassert.Nil(t, err)\n\tassert.Len(t, rootInfo, 3)\n\n\t\/\/ 'a'\n\tassert.Equal(t, \"a\", rootInfo[0].Name())\n\tassert.True(t, rootInfo[0].IsDir())\n\taInfo, err := ioutil.ReadDir(path.Join(tempDir, \"a\"))\n\tassert.Nil(t, err)\n\tassert.Len(t, aInfo, 1)\n\n\t\/\/ 'a\/b'\n\tassert.Equal(t, \"b\", aInfo[0].Name())\n\tassert.True(t, aInfo[0].IsDir())\n\tabInfo, err := ioutil.ReadDir(path.Join(tempDir, \"a\", \"b\"))\n\tassert.Nil(t, err)\n\tassert.Len(t, abInfo, 1)\n\n\t\/\/ 'a\/b\/d'\n\tassert.Equal(t, \"d\", abInfo[0].Name())\n\tassert.False(t, abInfo[0].IsDir())\n\tdContent, err := ioutil.ReadFile(path.Join(tempDir, \"a\", \"b\", \"d\"))\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"example text\\n\", string(dContent))\n\n\t\/\/ 'c'\n\tassert.Equal(t, \"c\", rootInfo[1].Name())\n\tassert.True(t, rootInfo[1].IsDir())\n\tcInfo, err := ioutil.ReadDir(path.Join(tempDir, \"c\"))\n\tassert.Nil(t, err)\n\tassert.Len(t, cInfo, 0)\n\n\t\/\/ 'e'\n\tassert.Equal(t, \"e\", rootInfo[2].Name())\n\tassert.NotZero(t, rootInfo[2].Mode()|os.ModeSymlink, \"the file 'e' is not a symlink\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage api\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hyperledger\/fabric\/gossip\/common\"\n)\n\n\/\/ SecurityAdvisor defines an external auxiliary object\n\/\/ that provides security and identity related capabilities\ntype SecurityAdvisor interface {\n\t\/\/ IsInMyOrg returns whether the given peer's certificate represents\n\t\/\/ a peer in the invoker's organization\n\tIsInMyOrg(PeerIdentityType) bool\n\n\t\/\/ Verify verifies a JoinChannelMessage, returns nil on success,\n\t\/\/ and an error on failure\n\tVerify(JoinChannelMessage) error\n}\n\n\/\/ ChannelNotifier is implemented by the gossip component and is used for the peer\n\/\/ layer to notify the gossip component of a JoinChannel event\ntype ChannelNotifier interface {\n\tJoinChannel(joinMsg JoinChannelMessage, chainID common.ChainID)\n}\n\n\/\/ JoinChannelMessage is the message that asserts a creation or mutation\n\/\/ of a channel's membership list, and is the message that is gossipped\n\/\/ among the peers\ntype JoinChannelMessage interface {\n\n\t\/\/ GetTimestamp returns the timestamp of the message's creation\n\tGetTimestamp() time.Time\n\n\t\/\/ Members returns all the peers that are in the channel\n\tMembers() []ChannelMember\n}\n\n\/\/ ChannelMember is a peer's certificate and endpoint (host:port)\ntype ChannelMember struct {\n\tCert PeerIdentityType \/\/ PeerIdentityType defines the certificate of the remote peer\n\tHost string \/\/ Host is the hostname\/ip address of the remote peer\n\tPort int \/\/ Port is the port the remote peer is listening on\n}\n<commit_msg>FAB-1018 MultiChannel API fabric<-->gossip<commit_after>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage api\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hyperledger\/fabric\/gossip\/common\"\n)\n\n\/\/ SecurityAdvisor defines an external auxiliary object\n\/\/ that provides security and identity related capabilities\ntype SecurityAdvisor interface {\n\t\/\/ OrgByPeerIdentity returns the OrgIdentityType\n\t\/\/ of a given peer identity\n\tOrgByPeerIdentity(PeerIdentityType) OrgIdentityType\n\n\t\/\/ Verify verifies a JoinChannelMessage, returns nil on success,\n\t\/\/ and an error on failure\n\tVerify(JoinChannelMessage) error\n\n\n}\n\n\/\/ ChannelNotifier is implemented by the gossip component and is used for the peer\n\/\/ layer to notify the gossip component of a JoinChannel event\ntype ChannelNotifier interface {\n\tJoinChannel(joinMsg JoinChannelMessage, chainID common.ChainID)\n}\n\n\/\/ JoinChannelMessage is the message that asserts a creation or mutation\n\/\/ of a channel's membership list, and is the message that is gossipped\n\/\/ among the peers\ntype JoinChannelMessage interface {\n\n\t\/\/ GetTimestamp returns the timestamp of the message's creation\n\tGetTimestamp() time.Time\n\n\t\/\/ AnchorPeers returns all the anchor peers that are in the channel\n\tAnchorPeers() []AnchorPeer\n}\n\n\/\/ AnchorPeer is an anchor peer's certificate and endpoint (host:port)\ntype AnchorPeer struct {\n\tCert PeerIdentityType \/\/ Cert defines the certificate of the remote peer\n\tHost string \/\/ Host is the hostname\/ip address of the remote peer\n\tPort int \/\/ Port is the port the remote peer is listening on\n}\n\n\/\/ OrgIdentityType defines the identity of an organization\ntype OrgIdentityType []byte\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file implements export filtering of an AST.\n\npackage doc\n\nimport (\n\t\"go\/ast\"\n\t\"go\/token\"\n)\n\n\/\/ filterIdentList removes unexported names from list in place\n\/\/ and returns the resulting list.\n\/\/\nfunc filterIdentList(list []*ast.Ident, blankOk bool) []*ast.Ident {\n\tj := 0\n\tfor _, x := range list {\n\t\tif ast.IsExported(x.Name) || (blankOk && x.Name == \"_\") {\n\t\t\tlist[j] = x\n\t\t\tj++\n\t\t}\n\t}\n\treturn list[0:j]\n}\n\n\/\/ removeErrorField removes anonymous fields named \"error\" from an interface.\n\/\/ This is called when \"error\" has been determined to be a local name,\n\/\/ not the predeclared type.\n\/\/\nfunc removeErrorField(ityp *ast.InterfaceType) {\n\tlist := ityp.Methods.List \/\/ we know that ityp.Methods != nil\n\tj := 0\n\tfor _, field := range list {\n\t\tkeepField := true\n\t\tif n := len(field.Names); n == 0 {\n\t\t\t\/\/ anonymous field\n\t\t\tif fname, _ := baseTypeName(field.Type); fname == \"error\" {\n\t\t\t\tkeepField = false\n\t\t\t}\n\t\t}\n\t\tif keepField {\n\t\t\tlist[j] = field\n\t\t\tj++\n\t\t}\n\t}\n\tif j < len(list) {\n\t\tityp.Incomplete = true\n\t}\n\tityp.Methods.List = list[0:j]\n}\n\n\/\/ filterFieldList removes unexported fields (field names) from the field list\n\/\/ in place and returns true if fields were removed. Anonymous fields are\n\/\/ recorded with the parent type. filterType is called with the types of\n\/\/ all remaining fields.\n\/\/\nfunc (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {\n\tif fields == nil {\n\t\treturn\n\t}\n\tlist := fields.List\n\tj := 0\n\tfor _, field := range list {\n\t\tkeepField := false\n\t\tif n := len(field.Names); n == 0 {\n\t\t\t\/\/ anonymous field\n\t\t\tfname := r.recordAnonymousField(parent, field.Type)\n\t\t\tif ast.IsExported(fname) {\n\t\t\t\tkeepField = true\n\t\t\t} else if ityp != nil && fname == \"error\" {\n\t\t\t\t\/\/ possibly the predeclared error interface; keep\n\t\t\t\t\/\/ it for now but remember this interface so that\n\t\t\t\t\/\/ it can be fixed if error is also defined locally\n\t\t\t\tkeepField = true\n\t\t\t\tr.remember(ityp)\n\t\t\t}\n\t\t} else {\n\t\t\tfield.Names = filterIdentList(field.Names, false)\n\t\t\tif len(field.Names) < n {\n\t\t\t\tremovedFields = true\n\t\t\t}\n\t\t\tif len(field.Names) > 0 {\n\t\t\t\tkeepField = true\n\t\t\t}\n\t\t}\n\t\tif keepField {\n\t\t\tr.filterType(nil, field.Type)\n\t\t\tlist[j] = field\n\t\t\tj++\n\t\t}\n\t}\n\tif j < len(list) {\n\t\tremovedFields = true\n\t}\n\tfields.List = list[0:j]\n\treturn\n}\n\n\/\/ filterParamList applies filterType to each parameter type in fields.\n\/\/\nfunc (r *reader) filterParamList(fields *ast.FieldList) {\n\tif fields != nil {\n\t\tfor _, f := range fields.List {\n\t\t\tr.filterType(nil, f.Type)\n\t\t}\n\t}\n}\n\n\/\/ filterType strips any unexported struct fields or method types from typ\n\/\/ in place. If fields (or methods) have been removed, the corresponding\n\/\/ struct or interface type has the Incomplete field set to true.\n\/\/\nfunc (r *reader) filterType(parent *namedType, typ ast.Expr) {\n\tswitch t := typ.(type) {\n\tcase *ast.Ident:\n\t\t\/\/ nothing to do\n\tcase *ast.ParenExpr:\n\t\tr.filterType(nil, t.X)\n\tcase *ast.ArrayType:\n\t\tr.filterType(nil, t.Elt)\n\tcase *ast.StructType:\n\t\tif r.filterFieldList(parent, t.Fields, nil) {\n\t\t\tt.Incomplete = true\n\t\t}\n\tcase *ast.FuncType:\n\t\tr.filterParamList(t.Params)\n\t\tr.filterParamList(t.Results)\n\tcase *ast.InterfaceType:\n\t\tif r.filterFieldList(parent, t.Methods, t) {\n\t\t\tt.Incomplete = true\n\t\t}\n\tcase *ast.MapType:\n\t\tr.filterType(nil, t.Key)\n\t\tr.filterType(nil, t.Value)\n\tcase *ast.ChanType:\n\t\tr.filterType(nil, t.Value)\n\t}\n}\n\nfunc (r *reader) filterSpec(spec ast.Spec, tok token.Token) bool {\n\tswitch s := spec.(type) {\n\tcase *ast.ImportSpec:\n\t\t\/\/ always keep imports so we can collect them\n\t\treturn true\n\tcase *ast.ValueSpec:\n\t\ts.Names = filterIdentList(s.Names, tok == token.CONST)\n\t\tif len(s.Names) > 0 {\n\t\t\tr.filterType(nil, s.Type)\n\t\t\treturn true\n\t\t}\n\tcase *ast.TypeSpec:\n\t\tif name := s.Name.Name; ast.IsExported(name) {\n\t\t\tr.filterType(r.lookupType(s.Name.Name), s.Type)\n\t\t\treturn true\n\t\t} else if name == \"error\" {\n\t\t\t\/\/ special case: remember that error is declared locally\n\t\t\tr.errorDecl = true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec {\n\tj := 0\n\tfor _, s := range list {\n\t\tif r.filterSpec(s, tok) {\n\t\t\tlist[j] = s\n\t\t\tj++\n\t\t}\n\t}\n\treturn list[0:j]\n}\n\nfunc (r *reader) filterDecl(decl ast.Decl) bool {\n\tswitch d := decl.(type) {\n\tcase *ast.GenDecl:\n\t\td.Specs = r.filterSpecList(d.Specs, d.Tok)\n\t\treturn len(d.Specs) > 0\n\tcase *ast.FuncDecl:\n\t\t\/\/ ok to filter these methods early because any\n\t\t\/\/ conflicting method will be filtered here, too -\n\t\t\/\/ thus, removing these methods early will not lead\n\t\t\/\/ to the false removal of possible conflicts\n\t\treturn ast.IsExported(d.Name.Name)\n\t}\n\treturn false\n}\n\n\/\/ fileExports removes unexported declarations from src in place.\n\/\/\nfunc (r *reader) fileExports(src *ast.File) {\n\tj := 0\n\tfor _, d := range src.Decls {\n\t\tif r.filterDecl(d) {\n\t\t\tsrc.Decls[j] = d\n\t\t\tj++\n\t\t}\n\t}\n\tsrc.Decls = src.Decls[0:j]\n}\n<commit_msg>go\/doc: document rationale for recent change<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file implements export filtering of an AST.\n\npackage doc\n\nimport (\n\t\"go\/ast\"\n\t\"go\/token\"\n)\n\n\/\/ filterIdentList removes unexported names from list in place\n\/\/ and returns the resulting list. If blankOk is set, blank\n\/\/ identifiers are considered exported names.\n\/\/\nfunc filterIdentList(list []*ast.Ident, blankOk bool) []*ast.Ident {\n\tj := 0\n\tfor _, x := range list {\n\t\tif ast.IsExported(x.Name) || (blankOk && x.Name == \"_\") {\n\t\t\tlist[j] = x\n\t\t\tj++\n\t\t}\n\t}\n\treturn list[0:j]\n}\n\n\/\/ removeErrorField removes anonymous fields named \"error\" from an interface.\n\/\/ This is called when \"error\" has been determined to be a local name,\n\/\/ not the predeclared type.\n\/\/\nfunc removeErrorField(ityp *ast.InterfaceType) {\n\tlist := ityp.Methods.List \/\/ we know that ityp.Methods != nil\n\tj := 0\n\tfor _, field := range list {\n\t\tkeepField := true\n\t\tif n := len(field.Names); n == 0 {\n\t\t\t\/\/ anonymous field\n\t\t\tif fname, _ := baseTypeName(field.Type); fname == \"error\" {\n\t\t\t\tkeepField = false\n\t\t\t}\n\t\t}\n\t\tif keepField {\n\t\t\tlist[j] = field\n\t\t\tj++\n\t\t}\n\t}\n\tif j < len(list) {\n\t\tityp.Incomplete = true\n\t}\n\tityp.Methods.List = list[0:j]\n}\n\n\/\/ filterFieldList removes unexported fields (field names) from the field list\n\/\/ in place and returns true if fields were removed. Anonymous fields are\n\/\/ recorded with the parent type. filterType is called with the types of\n\/\/ all remaining fields.\n\/\/\nfunc (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {\n\tif fields == nil {\n\t\treturn\n\t}\n\tlist := fields.List\n\tj := 0\n\tfor _, field := range list {\n\t\tkeepField := false\n\t\tif n := len(field.Names); n == 0 {\n\t\t\t\/\/ anonymous field\n\t\t\tfname := r.recordAnonymousField(parent, field.Type)\n\t\t\tif ast.IsExported(fname) {\n\t\t\t\tkeepField = true\n\t\t\t} else if ityp != nil && fname == \"error\" {\n\t\t\t\t\/\/ possibly the predeclared error interface; keep\n\t\t\t\t\/\/ it for now but remember this interface so that\n\t\t\t\t\/\/ it can be fixed if error is also defined locally\n\t\t\t\tkeepField = true\n\t\t\t\tr.remember(ityp)\n\t\t\t}\n\t\t} else {\n\t\t\tfield.Names = filterIdentList(field.Names, false)\n\t\t\tif len(field.Names) < n {\n\t\t\t\tremovedFields = true\n\t\t\t}\n\t\t\tif len(field.Names) > 0 {\n\t\t\t\tkeepField = true\n\t\t\t}\n\t\t}\n\t\tif keepField {\n\t\t\tr.filterType(nil, field.Type)\n\t\t\tlist[j] = field\n\t\t\tj++\n\t\t}\n\t}\n\tif j < len(list) {\n\t\tremovedFields = true\n\t}\n\tfields.List = list[0:j]\n\treturn\n}\n\n\/\/ filterParamList applies filterType to each parameter type in fields.\n\/\/\nfunc (r *reader) filterParamList(fields *ast.FieldList) {\n\tif fields != nil {\n\t\tfor _, f := range fields.List {\n\t\t\tr.filterType(nil, f.Type)\n\t\t}\n\t}\n}\n\n\/\/ filterType strips any unexported struct fields or method types from typ\n\/\/ in place. If fields (or methods) have been removed, the corresponding\n\/\/ struct or interface type has the Incomplete field set to true.\n\/\/\nfunc (r *reader) filterType(parent *namedType, typ ast.Expr) {\n\tswitch t := typ.(type) {\n\tcase *ast.Ident:\n\t\t\/\/ nothing to do\n\tcase *ast.ParenExpr:\n\t\tr.filterType(nil, t.X)\n\tcase *ast.ArrayType:\n\t\tr.filterType(nil, t.Elt)\n\tcase *ast.StructType:\n\t\tif r.filterFieldList(parent, t.Fields, nil) {\n\t\t\tt.Incomplete = true\n\t\t}\n\tcase *ast.FuncType:\n\t\tr.filterParamList(t.Params)\n\t\tr.filterParamList(t.Results)\n\tcase *ast.InterfaceType:\n\t\tif r.filterFieldList(parent, t.Methods, t) {\n\t\t\tt.Incomplete = true\n\t\t}\n\tcase *ast.MapType:\n\t\tr.filterType(nil, t.Key)\n\t\tr.filterType(nil, t.Value)\n\tcase *ast.ChanType:\n\t\tr.filterType(nil, t.Value)\n\t}\n}\n\nfunc (r *reader) filterSpec(spec ast.Spec, tok token.Token) bool {\n\tswitch s := spec.(type) {\n\tcase *ast.ImportSpec:\n\t\t\/\/ always keep imports so we can collect them\n\t\treturn true\n\tcase *ast.ValueSpec:\n\t\t\/\/ special case: consider blank constants as exported\n\t\t\/\/ (work-around for issue 5397)\n\t\ts.Names = filterIdentList(s.Names, tok == token.CONST)\n\t\tif len(s.Names) > 0 {\n\t\t\tr.filterType(nil, s.Type)\n\t\t\treturn true\n\t\t}\n\tcase *ast.TypeSpec:\n\t\tif name := s.Name.Name; ast.IsExported(name) {\n\t\t\tr.filterType(r.lookupType(s.Name.Name), s.Type)\n\t\t\treturn true\n\t\t} else if name == \"error\" {\n\t\t\t\/\/ special case: remember that error is declared locally\n\t\t\tr.errorDecl = true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec {\n\tj := 0\n\tfor _, s := range list {\n\t\tif r.filterSpec(s, tok) {\n\t\t\tlist[j] = s\n\t\t\tj++\n\t\t}\n\t}\n\treturn list[0:j]\n}\n\nfunc (r *reader) filterDecl(decl ast.Decl) bool {\n\tswitch d := decl.(type) {\n\tcase *ast.GenDecl:\n\t\td.Specs = r.filterSpecList(d.Specs, d.Tok)\n\t\treturn len(d.Specs) > 0\n\tcase *ast.FuncDecl:\n\t\t\/\/ ok to filter these methods early because any\n\t\t\/\/ conflicting method will be filtered here, too -\n\t\t\/\/ thus, removing these methods early will not lead\n\t\t\/\/ to the false removal of possible conflicts\n\t\treturn ast.IsExported(d.Name.Name)\n\t}\n\treturn false\n}\n\n\/\/ fileExports removes unexported declarations from src in place.\n\/\/\nfunc (r *reader) fileExports(src *ast.File) {\n\tj := 0\n\tfor _, d := range src.Decls {\n\t\tif r.filterDecl(d) {\n\t\t\tsrc.Decls[j] = d\n\t\t\tj++\n\t\t}\n\t}\n\tsrc.Decls = src.Decls[0:j]\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Primitives are basic shapes which can be directly drawn to screen.\n *\n *\/\n\npackage graphics\n\nimport \"image\/color\"\nimport \"github.com\/banthar\/Go-SDL\/sdl\"\n\n\/\/ A Primitive is a basic shape which can be drawn directly by the artist.\ntype Primitive interface {\n\tdraw(s *sdl.Surface)\n}\n\n\/\/ A Point is as it sounds, a single point in space.\ntype Point struct {\n\tx, y int\n\tc color.Color\n}\n\n\/\/ Points are drawn by setting a single corresponding pixel.\nfunc (p Point) draw(s *sdl.Surface) {\n\tsafeSet(s, p.x, p.y, p.c)\n}\n\n\/\/ A Rectangle is... a rectangle.\ntype Rectangle struct {\n\tx, y int16\n\tw, h uint16\n\tc color.Color\n}\n\n\/\/ Rectangles are drawn by directly calling FillRect on the surface.\nfunc (r Rectangle) draw(s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(r.c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\ts.FillRect(&sdl.Rect{r.x, r.y, r.w, r.h}, colorVal)\n}\n\n\/\/ Circles are, you guessed it. Circles.\ntype Circle struct {\n\tx, y int16 \/\/ Location on screen\n\tr uint16 \/\/ Radius\n\tb int \/\/ Border thickness. For now only controls if there IS a border or not, not actually it's thickness.\n\tc color.Color \/\/ Color\n}\n\n\/\/ Circles may be filled or not.\nfunc (c Circle) draw(s *sdl.Surface) {\n\tif c.b == 0 {\n\t\tdrawFilledCircle(c.x, c.y, c.r, c.c, s)\n\t} else {\n\t\tdrawOutlineCircle(c.x, c.y, c.r, c.c, s)\n\t}\n}\n\n\/\/ drawFilledCircle uses the integer midpoint circle algorithm to draw a filled\n\/\/ circle to the given surface.\nfunc drawFilledCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\n\tx := int16(r)\n\ty := int16(0)\n\te := 1 - x\n\n\tfor x >= y {\n\t\ts.FillRect(&sdl.Rect{-x + x0, y + y0, uint16(2 * x), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-x + x0, -y + y0, uint16(2 * x), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-y + x0, x + y0, uint16(2 * y), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-y + x0, -x + y0, uint16(2 * y), 1}, colorVal)\n\n\t\ty++\n\n\t\tif e < 0 {\n\t\t\te += 2*y + 1\n\t\t} else {\n\t\t\tx--\n\t\t\te += 2 * (y - x + 1)\n\t\t}\n\t}\n}\n\n\/\/ drawOutlineCircle uses the integer midpoint circle algorithm to draw the outline\n\/\/ of a circle (1 px thick) to the given surface.\nfunc drawOutlineCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tcolor := sdl.ColorFromGoColor(c)\n\n\tx := int16(r)\n\ty := int16(0)\n\te := 1 - x\n\n\tfor x >= y {\n\t\tsafeSet(s, int(x+x0), int(y+y0), color)\n\t\tsafeSet(s, int(x+x0), int(-y+y0), color)\n\t\tsafeSet(s, int(-x+x0), int(y+y0), color)\n\t\tsafeSet(s, int(-x+x0), int(-y+y0), color)\n\t\tsafeSet(s, int(y+x0), int(x+y0), color)\n\t\tsafeSet(s, int(y+x0), int(-x+y0), color)\n\t\tsafeSet(s, int(-y+x0), int(x+y0), color)\n\t\tsafeSet(s, int(-y+x0), int(-x+y0), color)\n\n\t\ty++\n\n\t\tif e < 0 {\n\t\t\te += 2*y + 1\n\t\t} else {\n\t\t\tx--\n\t\t\te += 2 * (y - x + 1)\n\t\t}\n\t}\n}\n\nfunc safeSet(s *sdl.Surface, x, y int, c sdl.Color) {\n\tif x >= 0 && y >= 0 && x < int(s.W) && y < int(s.H) {\n\t\ts.Set(x, y, c)\n\t}\n}\n<commit_msg>Fixed bug in primitive drawing. Was using wrong type of color.<commit_after>\/*\n * Primitives are basic shapes which can be directly drawn to screen.\n *\n *\/\n\npackage graphics\n\nimport \"image\/color\"\nimport \"github.com\/banthar\/Go-SDL\/sdl\"\n\n\/\/ A Primitive is a basic shape which can be drawn directly by the artist.\ntype Primitive interface {\n\tdraw(s *sdl.Surface)\n}\n\n\/\/ A Point is as it sounds, a single point in space.\ntype Point struct {\n\tx, y int\n\tc color.Color\n}\n\n\/\/ Points are drawn by setting a single corresponding pixel.\nfunc (p Point) draw(s *sdl.Surface) {\n\tcolor := sdl.ColorFromGoColor(p.c)\n\tsafeSet(s, p.x, p.y, color)\n}\n\n\/\/ A Rectangle is... a rectangle.\ntype Rectangle struct {\n\tx, y int16\n\tw, h uint16\n\tc color.Color\n}\n\n\/\/ Rectangles are drawn by directly calling FillRect on the surface.\nfunc (r Rectangle) draw(s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(r.c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\ts.FillRect(&sdl.Rect{r.x, r.y, r.w, r.h}, colorVal)\n}\n\n\/\/ Circles are, you guessed it. Circles.\ntype Circle struct {\n\tx, y int16 \/\/ Location on screen\n\tr uint16 \/\/ Radius\n\tb int \/\/ Border thickness. For now only controls if there IS a border or not, not actually it's thickness.\n\tc color.Color \/\/ Color\n}\n\n\/\/ Circles may be filled or not.\nfunc (c Circle) draw(s *sdl.Surface) {\n\tif c.b == 0 {\n\t\tdrawFilledCircle(c.x, c.y, c.r, c.c, s)\n\t} else {\n\t\tdrawOutlineCircle(c.x, c.y, c.r, c.c, s)\n\t}\n}\n\n\/\/ drawFilledCircle uses the integer midpoint circle algorithm to draw a filled\n\/\/ circle to the given surface.\nfunc drawFilledCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\n\tx := int16(r)\n\ty := int16(0)\n\te := 1 - x\n\n\tfor x >= y {\n\t\ts.FillRect(&sdl.Rect{-x + x0, y + y0, uint16(2 * x), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-x + x0, -y + y0, uint16(2 * x), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-y + x0, x + y0, uint16(2 * y), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-y + x0, -x + y0, uint16(2 * y), 1}, colorVal)\n\n\t\ty++\n\n\t\tif e < 0 {\n\t\t\te += 2*y + 1\n\t\t} else {\n\t\t\tx--\n\t\t\te += 2 * (y - x + 1)\n\t\t}\n\t}\n}\n\n\/\/ drawOutlineCircle uses the integer midpoint circle algorithm to draw the outline\n\/\/ of a circle (1 px thick) to the given surface.\nfunc drawOutlineCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tcolor := sdl.ColorFromGoColor(c)\n\n\tx := int16(r)\n\ty := int16(0)\n\te := 1 - x\n\n\tfor x >= y {\n\t\tsafeSet(s, int(x+x0), int(y+y0), color)\n\t\tsafeSet(s, int(x+x0), int(-y+y0), color)\n\t\tsafeSet(s, int(-x+x0), int(y+y0), color)\n\t\tsafeSet(s, int(-x+x0), int(-y+y0), color)\n\t\tsafeSet(s, int(y+x0), int(x+y0), color)\n\t\tsafeSet(s, int(y+x0), int(-x+y0), color)\n\t\tsafeSet(s, int(-y+x0), int(x+y0), color)\n\t\tsafeSet(s, int(-y+x0), int(-x+y0), color)\n\n\t\ty++\n\n\t\tif e < 0 {\n\t\t\te += 2*y + 1\n\t\t} else {\n\t\t\tx--\n\t\t\te += 2 * (y - x + 1)\n\t\t}\n\t}\n}\n\nfunc safeSet(s *sdl.Surface, x, y int, c sdl.Color) {\n\tif x >= 0 && y >= 0 && x < int(s.W) && y < int(s.H) {\n\t\ts.Set(x, y, c)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package goftp\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ftp服务器默认监听端口号\nconst (\n\tFTP_SERVER_DEFAULT_LISTENING_PORT = 21\n)\n\nconst (\n\tFC_REQUEST_SUFFIX string = \"\\r\\n\"\n\tFC_RESPONSE_BUFFER int = 1024\n)\n\nconst (\n\tFC_RESP_CODE_ENTER_PASSIVE_MODE int = 227\n)\n\n\/\/定义与ftp服务器进行交互的命令,前缀FC表示Ftp Command\nconst (\n\tFC_USER string = \"USER\" \/\/USER login_name\n\tFC_PASS string = \"PASS\" \/\/PASS login_pass\n\tFC_ACCT string = \"ACCT\" \/\/ACCT account_name\n\tFC_QUIT string = \"QUIT\" \/\/QUIT\n\tFC_PWD string = \"PWD\" \/\/PWD\n\tFC_CWD string = \"CWD\" \/\/CWD remote_dir\n\tFC_LIST string = \"LIST\" \/\/LIST remote_dir\n\tFC_PASV string = \"PASV\" \/\/PASV\n)\n\ntype GoFtpClientCmd struct {\n\tName string\n\tParams []string\n\tConnected bool\n\n\tDefaultLocalWorkDir string\n\tLocalWorkDir string\n\tUsername string\n\n\tFtpConn net.Conn\n\n\tGoFtpClientHelp\n}\n\nfunc (this *GoFtpClientCmd) welcome() {\n\tvar data []byte = make([]byte, 1024)\n\treadCount, err := this.FtpConn.Read(data)\n\tif err == nil {\n\t\tfmt.Print(string(data[:readCount]))\n\t\t\/\/提示输入登录名\n\t\tvar remoteAddr = this.FtpConn.RemoteAddr().String()\n\t\tvar portIndex = strings.LastIndex(remoteAddr, \":\")\n\t\tfmt.Printf(\"Name (%s:%s):\", remoteAddr[:portIndex], this.Username)\n\t\tvar username string\n\t\tfmt.Scanln(&username)\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\t\/\/提示输入登录密码\n\t\tfmt.Print(\"Password:\")\n\t\tvar password string\n\t\tfmt.Scanln(&password)\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t} else {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc (this *GoFtpClientCmd) sendCmdRequest(ftpParams []string) {\n\tif this.Connected {\n\t\tvar sendData = fmt.Sprint(strings.Join(ftpParams, \" \"), FC_REQUEST_SUFFIX)\n\t\tthis.FtpConn.Write([]byte(sendData))\n\t} else {\n\t\tfmt.Println(\"Not connected.\")\n\t}\n}\n\nfunc (this *GoFtpClientCmd) recvCmdResponse() (recvData string) {\n\tif this.Connected {\n\t\tvar recvBytes []byte = make([]byte, FC_RESPONSE_BUFFER)\n\t\treadCount, err := this.FtpConn.Read(recvBytes)\n\t\tif err == nil {\n\t\t\trecvData = string(recvBytes[:readCount])\n\t\t\tfmt.Print(string(recvData))\n\t\t} else {\n\t\t\tfmt.Println(\"ftp:\", err)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) open() {\n\tif this.Connected {\n\t\tvar remoteAddr = this.FtpConn.RemoteAddr().String()\n\t\tvar portIndex = strings.LastIndex(remoteAddr, \":\")\n\t\tfmt.Println(\"Already connected to \", remoteAddr[:portIndex], \", use close first.\")\n\t} else {\n\t\tvar paramCount = len(this.Params)\n\t\tvar ftpHost string\n\t\tvar ftpPort int\n\t\tif paramCount == 0 {\n\t\t\tfmt.Print(\"(To) \")\n\t\t\tcmdReader := bufio.NewReader(os.Stdin)\n\t\t\tcmdStr, err := cmdReader.ReadString('\\n')\n\t\t\tcmdStr = strings.Trim(cmdStr, \"\\r\\n\")\n\t\t\tif err == nil && cmdStr != \"\" {\n\t\t\t\tcmdParts := strings.Fields(cmdStr)\n\t\t\t\tcmdPartCount := len(cmdParts)\n\t\t\t\tif cmdPartCount == 1 {\n\t\t\t\t\tftpHost = cmdParts[0]\n\t\t\t\t\tftpPort = FTP_SERVER_DEFAULT_LISTENING_PORT\n\t\t\t\t} else if cmdPartCount == 2 {\n\t\t\t\t\tftpHost = cmdParts[0]\n\t\t\t\t\tport, err := strconv.Atoi(cmdParts[1])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tthis.cmdUsage(this.Name)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tftpPort = port\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tthis.cmdUsage(this.Name)\n\t\t\t}\n\t\t} else if paramCount == 1 {\n\t\t\tftpHost = this.Params[0]\n\t\t\tftpPort = FTP_SERVER_DEFAULT_LISTENING_PORT\n\t\t} else if paramCount == 2 {\n\t\t\tftpHost = this.Params[0]\n\t\t\tport, err := strconv.Atoi(this.Params[1])\n\t\t\tif err != nil {\n\t\t\t\tthis.cmdUsage(this.Name)\n\t\t\t} else {\n\t\t\t\tftpPort = port\n\t\t\t}\n\t\t}\n\n\t\t\/\/建立ftp连接\n\t\tif ftpHost != \"\" {\n\t\t\tips, lookupErr := net.LookupIP(ftpHost)\n\t\t\tif lookupErr != nil {\n\t\t\t\tfmt.Println(\"ftp: Can't lookup host `\", ftpHost, \"'\")\n\t\t\t} else {\n\t\t\t\tvar port = strconv.Itoa(ftpPort)\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tconn, connErr := net.DialTimeout(\"tcp\", net.JoinHostPort(ip.String(), port),\n\t\t\t\t\t\ttime.Duration(DIAL_FTP_SERVER_TIMEOUT_SECONDS)*time.Second)\n\t\t\t\t\tif connErr != nil {\n\t\t\t\t\t\tfmt.Println(\"Trying \", ip, \"...\")\n\t\t\t\t\t\tfmt.Println(\"ftp:\", connErr.Error())\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Println(\"Connected to\", ip, \".\")\n\t\t\t\t\t\tvar sysUser, _ = user.Current()\n\t\t\t\t\t\tthis.FtpConn = conn\n\t\t\t\t\t\tthis.Connected = true\n\t\t\t\t\t\tthis.Username = sysUser.Username\n\t\t\t\t\t\tthis.DefaultLocalWorkDir = sysUser.HomeDir\n\t\t\t\t\t\tthis.LocalWorkDir = sysUser.HomeDir\n\n\t\t\t\t\t\tthis.welcome()\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (this *GoFtpClientCmd) parseCmdResponse(respData string) (ftpRespCode int, err error) {\n\tvar recvDataParts = strings.Fields(respData)\n\tif len(recvDataParts) > 0 {\n\t\tftpRespCode, err = strconv.Atoi(recvDataParts[0])\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) lcd() {\n\tvar paramCount = len(this.Params)\n\tif paramCount == 0 || paramCount == 1 {\n\t\tif paramCount == 0 {\n\t\t\tthis.LocalWorkDir = this.DefaultLocalWorkDir\n\t\t\tfmt.Println(\"Local directory now:\", this.LocalWorkDir)\n\t\t} else {\n\t\t\tvar path = this.Params[0]\n\t\t\tif !filepath.IsAbs(path) {\n\t\t\t\tpath = filepath.Join(this.DefaultLocalWorkDir, path)\n\t\t\t}\n\t\t\tfiInfo, err := os.Stat(path)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"ftp:\", err.Error())\n\t\t\t} else {\n\t\t\t\tif fiInfo.IsDir() {\n\t\t\t\t\tthis.LocalWorkDir = path\n\t\t\t\t\tfmt.Println(\"Local directory now:\", path)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"ftp: Can't chdir `\", path, \"': No such file or directory\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tthis.cmdUsage(this.Name)\n\t}\n}\n\nfunc (this *GoFtpClientCmd) user() {\n\tvar paramCount = len(this.Params)\n\tvar username string\n\tvar password string\n\tvar account string\n\tif paramCount == 0 {\n\t\tfmt.Print(\"Username:\")\n\t\tfmt.Scanln(&username)\n\t\tusername = strings.Trim(username, \"\\r\\n\")\n\t\tif username == \"\" {\n\t\t\tthis.cmdUsage(this.Name)\n\t\t} else {\n\t\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\t\tthis.recvCmdResponse()\n\t\t\tfmt.Print(\"Password:\")\n\t\t\tfmt.Scanln(&password)\n\t\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\t\tthis.recvCmdResponse()\n\t\t}\n\t} else if paramCount == 1 {\n\t\tusername = this.Params[0]\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\tfmt.Print(\"Password:\")\n\t\tfmt.Scanln(&password)\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t} else if paramCount == 2 {\n\t\tusername = this.Params[0]\n\t\tpassword = this.Params[1]\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t} else if paramCount == 3 {\n\t\tusername = this.Params[0]\n\t\tpassword = this.Params[1]\n\t\taccount = this.Params[2]\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t\tthis.sendCmdRequest([]string{FC_ACCT, account})\n\t\tthis.recvCmdResponse()\n\t}\n\n}\n\nfunc (this *GoFtpClientCmd) pwd() {\n\tthis.sendCmdRequest([]string{FC_PWD})\n\tthis.recvCmdResponse()\n}\n\nfunc (this *GoFtpClientCmd) cwd() {\n\tvar paramCount = len(this.Params)\n\tif paramCount == 0 {\n\t\tfmt.Println(\"(remote-directory)\")\n\t\tvar remoteDir string\n\t\tfmt.Scanln(&remoteDir)\n\t\tif remoteDir != \"\" {\n\t\t\tthis.sendCmdRequest([]string{FC_CWD, remoteDir})\n\t\t\tthis.recvCmdResponse()\n\t\t} else {\n\t\t\tthis.cmdUsage(this.Name)\n\t\t}\n\t} else if paramCount > 1 {\n\t\tthis.cmdUsage(this.Name)\n\t} else {\n\t\tthis.sendCmdRequest([]string{FC_CWD, this.Params[0]})\n\t\tthis.recvCmdResponse()\n\t}\n}\n\nfunc (this *GoFtpClientCmd) ls() {\n\tvar paramCount = len(this.Params)\n\tif paramCount >= 0 && paramCount <= 2 {\n\t\tvar resultOutputFile string\n\t\tvar remoteDir string\n\t\tif paramCount == 1 {\n\t\t\tremoteDir = this.Params[0]\n\t\t} else if paramCount == 2 {\n\t\t\tremoteDir = this.Params[0]\n\t\t\tresultOutputFile = this.Params[1]\n\t\t}\n\t\tvar outputFile *os.File\n\t\tvar err error\n\t\tif resultOutputFile != \"\" {\n\t\t\tif !filepath.IsAbs(resultOutputFile) {\n\t\t\t\tresultOutputFile = filepath.Join(this.LocalWorkDir, resultOutputFile)\n\t\t\t}\n\t\t\toutputFile, err = os.Create(resultOutputFile)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"ftp: Can't access `\", resultOutputFile, \"': No such file or directory\")\n\t\t\t}\n\t\t}\n\n\t\tif err == nil {\n\t\t\tpasvHost, pasvPort, ftpRespCode, errPasv := this.pasv()\n\t\t\tif errPasv == nil {\n\t\t\t\tif pasvHost != \"\" && ftpRespCode == FC_RESP_CODE_ENTER_PASSIVE_MODE {\n\t\t\t\t\tthis.sendCmdRequest([]string{FC_LIST, remoteDir})\n\t\t\t\t\tthis.recvCmdResponse()\n\t\t\t\t\tvar pasvRespData = this.getPasvData(pasvHost, pasvPort)\n\t\t\t\t\tif outputFile != nil {\n\t\t\t\t\t\tvar bWriter = bufio.NewWriter(outputFile)\n\t\t\t\t\t\tbWriter.WriteString(string(pasvRespData))\n\t\t\t\t\t\tbWriter.Flush()\n\t\t\t\t\t\toutputFile.Close()\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Print(string(pasvRespData))\n\t\t\t\t\t}\n\t\t\t\t\tthis.recvCmdResponse()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tthis.cmdUsage(this.Name)\n\t}\n}\n\nfunc (this *GoFtpClientCmd) pasv() (pasvHost string, pasvPort int, ftpRespCode int, err error) {\n\tif this.Connected {\n\t\tthis.sendCmdRequest([]string{FC_PASV})\n\t\tvar recvData = this.recvCmdResponse()\n\t\tvar startIndex = strings.Index(recvData, \"(\")\n\t\tvar endIndex = strings.LastIndex(recvData, \")\")\n\t\tif startIndex == -1 || endIndex == -1 {\n\t\t\terr = errors.New(\"ftp: PASV command failed.\")\n\t\t} else {\n\t\t\tvar pasvDataStr = recvData[startIndex+1 : endIndex]\n\t\t\tvar pasvDataParts = strings.Split(pasvDataStr, \",\")\n\t\t\tpasvHost = strings.Join(pasvDataParts[:4], \".\")\n\t\t\tvar p1, p2 int\n\t\t\tp1, err = strconv.Atoi(pasvDataParts[4])\n\t\t\tp2, err = strconv.Atoi(pasvDataParts[5])\n\t\t\tpasvPort = p1*256 + p2\n\n\t\t\tftpRespCode, err = this.parseCmdResponse(recvData)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Not connected.\")\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) getPasvData(pasvHost string, pasvPort int) (pasvRespData []byte) {\n\tpasvConn, pasvConnErr := net.DialTimeout(\"tcp\", net.JoinHostPort(pasvHost, strconv.Itoa(pasvPort)),\n\t\ttime.Duration(DIAL_FTP_SERVER_TIMEOUT_SECONDS)*time.Second)\n\tif pasvConnErr != nil {\n\t\tfmt.Println(pasvConnErr.Error())\n\t} else {\n\t\tvar bReader = bufio.NewReader(pasvConn)\n\t\tpasvRespData = make([]byte, 0)\n\t\tfor {\n\t\t\tline, err := bReader.ReadBytes('\\n')\n\t\t\tpasvRespData = append(pasvRespData, []byte(line)...)\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tpasvConn.Close()\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) disconnect() {\n\tthis.close()\n}\n\nfunc (this *GoFtpClientCmd) close() {\n\tif this.FtpConn != nil {\n\t\tthis.sendCmdRequest([]string{FC_QUIT})\n\t\tthis.recvCmdResponse()\n\n\t\tthis.FtpConn = nil\n\t\tthis.Name = \"\"\n\t\tthis.Params = nil\n\t\tthis.Connected = false\n\t}\n}\n<commit_msg>让这里的接受命令链路回复的代码在单独goroutine里面运行<commit_after>package goftp\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ftp服务器默认监听端口号\nconst (\n\tFTP_SERVER_DEFAULT_LISTENING_PORT = 21\n)\n\nconst (\n\tFC_REQUEST_SUFFIX string = \"\\r\\n\"\n\tFC_RESPONSE_BUFFER int = 1024\n)\n\nconst (\n\tFC_RESP_CODE_ENTER_PASSIVE_MODE int = 227\n)\n\n\/\/定义与ftp服务器进行交互的命令,前缀FC表示Ftp Command\nconst (\n\tFC_USER string = \"USER\" \/\/USER login_name\n\tFC_PASS string = \"PASS\" \/\/PASS login_pass\n\tFC_ACCT string = \"ACCT\" \/\/ACCT account_name\n\tFC_QUIT string = \"QUIT\" \/\/QUIT\n\tFC_PWD string = \"PWD\" \/\/PWD\n\tFC_CWD string = \"CWD\" \/\/CWD remote_dir\n\tFC_LIST string = \"LIST\" \/\/LIST remote_dir\n\tFC_PASV string = \"PASV\" \/\/PASV\n)\n\ntype GoFtpClientCmd struct {\n\tName string\n\tParams []string\n\tConnected bool\n\n\tDefaultLocalWorkDir string\n\tLocalWorkDir string\n\tUsername string\n\n\tFtpConn net.Conn\n\n\tGoFtpClientHelp\n}\n\nfunc (this *GoFtpClientCmd) welcome() {\n\tvar data []byte = make([]byte, 1024)\n\treadCount, err := this.FtpConn.Read(data)\n\tif err == nil {\n\t\tfmt.Print(string(data[:readCount]))\n\t\t\/\/提示输入登录名\n\t\tvar remoteAddr = this.FtpConn.RemoteAddr().String()\n\t\tvar portIndex = strings.LastIndex(remoteAddr, \":\")\n\t\tfmt.Printf(\"Name (%s:%s):\", remoteAddr[:portIndex], this.Username)\n\t\tvar username string\n\t\tfmt.Scanln(&username)\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\t\/\/提示输入登录密码\n\t\tfmt.Print(\"Password:\")\n\t\tvar password string\n\t\tfmt.Scanln(&password)\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t} else {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc (this *GoFtpClientCmd) sendCmdRequest(ftpParams []string) {\n\tif this.Connected {\n\t\tvar sendData = fmt.Sprint(strings.Join(ftpParams, \" \"), FC_REQUEST_SUFFIX)\n\t\tthis.FtpConn.Write([]byte(sendData))\n\t} else {\n\t\tfmt.Println(\"Not connected.\")\n\t}\n}\n\nfunc (this *GoFtpClientCmd) recvCmdResponse() (recvData string) {\n\tif this.Connected {\n\t\tvar recvBytes []byte = make([]byte, FC_RESPONSE_BUFFER)\n\t\treadCount, err := this.FtpConn.Read(recvBytes)\n\t\tif err == nil {\n\t\t\trecvData = string(recvBytes[:readCount])\n\t\t\tfmt.Print(string(recvData))\n\t\t} else {\n\t\t\tfmt.Println(\"ftp:\", err)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) open() {\n\tif this.Connected {\n\t\tvar remoteAddr = this.FtpConn.RemoteAddr().String()\n\t\tvar portIndex = strings.LastIndex(remoteAddr, \":\")\n\t\tfmt.Println(\"Already connected to \", remoteAddr[:portIndex], \", use close first.\")\n\t} else {\n\t\tvar paramCount = len(this.Params)\n\t\tvar ftpHost string\n\t\tvar ftpPort int\n\t\tif paramCount == 0 {\n\t\t\tfmt.Print(\"(To) \")\n\t\t\tcmdReader := bufio.NewReader(os.Stdin)\n\t\t\tcmdStr, err := cmdReader.ReadString('\\n')\n\t\t\tcmdStr = strings.Trim(cmdStr, \"\\r\\n\")\n\t\t\tif err == nil && cmdStr != \"\" {\n\t\t\t\tcmdParts := strings.Fields(cmdStr)\n\t\t\t\tcmdPartCount := len(cmdParts)\n\t\t\t\tif cmdPartCount == 1 {\n\t\t\t\t\tftpHost = cmdParts[0]\n\t\t\t\t\tftpPort = FTP_SERVER_DEFAULT_LISTENING_PORT\n\t\t\t\t} else if cmdPartCount == 2 {\n\t\t\t\t\tftpHost = cmdParts[0]\n\t\t\t\t\tport, err := strconv.Atoi(cmdParts[1])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tthis.cmdUsage(this.Name)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tftpPort = port\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tthis.cmdUsage(this.Name)\n\t\t\t}\n\t\t} else if paramCount == 1 {\n\t\t\tftpHost = this.Params[0]\n\t\t\tftpPort = FTP_SERVER_DEFAULT_LISTENING_PORT\n\t\t} else if paramCount == 2 {\n\t\t\tftpHost = this.Params[0]\n\t\t\tport, err := strconv.Atoi(this.Params[1])\n\t\t\tif err != nil {\n\t\t\t\tthis.cmdUsage(this.Name)\n\t\t\t} else {\n\t\t\t\tftpPort = port\n\t\t\t}\n\t\t}\n\n\t\t\/\/建立ftp连接\n\t\tif ftpHost != \"\" {\n\t\t\tips, lookupErr := net.LookupIP(ftpHost)\n\t\t\tif lookupErr != nil {\n\t\t\t\tfmt.Println(\"ftp: Can't lookup host `\", ftpHost, \"'\")\n\t\t\t} else {\n\t\t\t\tvar port = strconv.Itoa(ftpPort)\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tconn, connErr := net.DialTimeout(\"tcp\", net.JoinHostPort(ip.String(), port),\n\t\t\t\t\t\ttime.Duration(DIAL_FTP_SERVER_TIMEOUT_SECONDS)*time.Second)\n\t\t\t\t\tif connErr != nil {\n\t\t\t\t\t\tfmt.Println(\"Trying \", ip, \"...\")\n\t\t\t\t\t\tfmt.Println(\"ftp:\", connErr.Error())\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Println(\"Connected to\", ip, \".\")\n\t\t\t\t\t\tvar sysUser, _ = user.Current()\n\t\t\t\t\t\tthis.FtpConn = conn\n\t\t\t\t\t\tthis.Connected = true\n\t\t\t\t\t\tthis.Username = sysUser.Username\n\t\t\t\t\t\tthis.DefaultLocalWorkDir = sysUser.HomeDir\n\t\t\t\t\t\tthis.LocalWorkDir = sysUser.HomeDir\n\n\t\t\t\t\t\tthis.welcome()\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (this *GoFtpClientCmd) parseCmdResponse(respData string) (ftpRespCode int, err error) {\n\tvar recvDataParts = strings.Fields(respData)\n\tif len(recvDataParts) > 0 {\n\t\tftpRespCode, err = strconv.Atoi(recvDataParts[0])\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) lcd() {\n\tvar paramCount = len(this.Params)\n\tif paramCount == 0 || paramCount == 1 {\n\t\tif paramCount == 0 {\n\t\t\tthis.LocalWorkDir = this.DefaultLocalWorkDir\n\t\t\tfmt.Println(\"Local directory now:\", this.LocalWorkDir)\n\t\t} else {\n\t\t\tvar path = this.Params[0]\n\t\t\tif !filepath.IsAbs(path) {\n\t\t\t\tpath = filepath.Join(this.DefaultLocalWorkDir, path)\n\t\t\t}\n\t\t\tfiInfo, err := os.Stat(path)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"ftp:\", err.Error())\n\t\t\t} else {\n\t\t\t\tif fiInfo.IsDir() {\n\t\t\t\t\tthis.LocalWorkDir = path\n\t\t\t\t\tfmt.Println(\"Local directory now:\", path)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"ftp: Can't chdir `\", path, \"': No such file or directory\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tthis.cmdUsage(this.Name)\n\t}\n}\n\nfunc (this *GoFtpClientCmd) user() {\n\tvar paramCount = len(this.Params)\n\tvar username string\n\tvar password string\n\tvar account string\n\tif paramCount == 0 {\n\t\tfmt.Print(\"Username:\")\n\t\tfmt.Scanln(&username)\n\t\tusername = strings.Trim(username, \"\\r\\n\")\n\t\tif username == \"\" {\n\t\t\tthis.cmdUsage(this.Name)\n\t\t} else {\n\t\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\t\tthis.recvCmdResponse()\n\t\t\tfmt.Print(\"Password:\")\n\t\t\tfmt.Scanln(&password)\n\t\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\t\tthis.recvCmdResponse()\n\t\t}\n\t} else if paramCount == 1 {\n\t\tusername = this.Params[0]\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\tfmt.Print(\"Password:\")\n\t\tfmt.Scanln(&password)\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t} else if paramCount == 2 {\n\t\tusername = this.Params[0]\n\t\tpassword = this.Params[1]\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t} else if paramCount == 3 {\n\t\tusername = this.Params[0]\n\t\tpassword = this.Params[1]\n\t\taccount = this.Params[2]\n\t\tthis.sendCmdRequest([]string{FC_USER, username})\n\t\tthis.recvCmdResponse()\n\t\tthis.sendCmdRequest([]string{FC_PASS, password})\n\t\tthis.recvCmdResponse()\n\t\tthis.sendCmdRequest([]string{FC_ACCT, account})\n\t\tthis.recvCmdResponse()\n\t}\n\n}\n\nfunc (this *GoFtpClientCmd) pwd() {\n\tthis.sendCmdRequest([]string{FC_PWD})\n\tthis.recvCmdResponse()\n}\n\nfunc (this *GoFtpClientCmd) cwd() {\n\tvar paramCount = len(this.Params)\n\tif paramCount == 0 {\n\t\tfmt.Println(\"(remote-directory)\")\n\t\tvar remoteDir string\n\t\tfmt.Scanln(&remoteDir)\n\t\tif remoteDir != \"\" {\n\t\t\tthis.sendCmdRequest([]string{FC_CWD, remoteDir})\n\t\t\tthis.recvCmdResponse()\n\t\t} else {\n\t\t\tthis.cmdUsage(this.Name)\n\t\t}\n\t} else if paramCount > 1 {\n\t\tthis.cmdUsage(this.Name)\n\t} else {\n\t\tthis.sendCmdRequest([]string{FC_CWD, this.Params[0]})\n\t\tthis.recvCmdResponse()\n\t}\n}\n\nfunc (this *GoFtpClientCmd) ls() {\n\tvar paramCount = len(this.Params)\n\tif paramCount >= 0 && paramCount <= 2 {\n\t\tvar resultOutputFile string\n\t\tvar remoteDir string\n\t\tif paramCount == 1 {\n\t\t\tremoteDir = this.Params[0]\n\t\t} else if paramCount == 2 {\n\t\t\tremoteDir = this.Params[0]\n\t\t\tresultOutputFile = this.Params[1]\n\t\t}\n\t\tvar outputFile *os.File\n\t\tvar err error\n\t\tif resultOutputFile != \"\" {\n\t\t\tif !filepath.IsAbs(resultOutputFile) {\n\t\t\t\tresultOutputFile = filepath.Join(this.LocalWorkDir, resultOutputFile)\n\t\t\t}\n\t\t\toutputFile, err = os.Create(resultOutputFile)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"ftp: Can't access `\", resultOutputFile, \"': No such file or directory\")\n\t\t\t}\n\t\t}\n\n\t\tif err == nil {\n\t\t\tpasvHost, pasvPort, ftpRespCode, errPasv := this.pasv()\n\t\t\tif errPasv == nil {\n\t\t\t\tif pasvHost != \"\" && ftpRespCode == FC_RESP_CODE_ENTER_PASSIVE_MODE {\n\t\t\t\t\tthis.sendCmdRequest([]string{FC_LIST, remoteDir})\n\t\t\t\t\tgo this.recvCmdResponse()\n\t\t\t\t\tvar pasvRespData = this.getPasvData(pasvHost, pasvPort)\n\t\t\t\t\tif outputFile != nil {\n\t\t\t\t\t\tvar bWriter = bufio.NewWriter(outputFile)\n\t\t\t\t\t\tbWriter.WriteString(string(pasvRespData))\n\t\t\t\t\t\tbWriter.Flush()\n\t\t\t\t\t\toutputFile.Close()\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Print(string(pasvRespData))\n\t\t\t\t\t}\n\t\t\t\t\tthis.recvCmdResponse()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tthis.cmdUsage(this.Name)\n\t}\n}\n\nfunc (this *GoFtpClientCmd) pasv() (pasvHost string, pasvPort int, ftpRespCode int, err error) {\n\tif this.Connected {\n\t\tthis.sendCmdRequest([]string{FC_PASV})\n\t\tvar recvData = this.recvCmdResponse()\n\t\tvar startIndex = strings.Index(recvData, \"(\")\n\t\tvar endIndex = strings.LastIndex(recvData, \")\")\n\t\tif startIndex == -1 || endIndex == -1 {\n\t\t\terr = errors.New(\"ftp: PASV command failed.\")\n\t\t} else {\n\t\t\tvar pasvDataStr = recvData[startIndex+1 : endIndex]\n\t\t\tvar pasvDataParts = strings.Split(pasvDataStr, \",\")\n\t\t\tpasvHost = strings.Join(pasvDataParts[:4], \".\")\n\t\t\tvar p1, p2 int\n\t\t\tp1, err = strconv.Atoi(pasvDataParts[4])\n\t\t\tp2, err = strconv.Atoi(pasvDataParts[5])\n\t\t\tpasvPort = p1*256 + p2\n\n\t\t\tftpRespCode, err = this.parseCmdResponse(recvData)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Not connected.\")\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) getPasvData(pasvHost string, pasvPort int) (pasvRespData []byte) {\n\tpasvConn, pasvConnErr := net.DialTimeout(\"tcp\", net.JoinHostPort(pasvHost, strconv.Itoa(pasvPort)),\n\t\ttime.Duration(DIAL_FTP_SERVER_TIMEOUT_SECONDS)*time.Second)\n\tif pasvConnErr != nil {\n\t\tfmt.Println(pasvConnErr.Error())\n\t} else {\n\t\tvar bReader = bufio.NewReader(pasvConn)\n\t\tpasvRespData = make([]byte, 0)\n\t\tfor {\n\t\t\tline, err := bReader.ReadBytes('\\n')\n\t\t\tpasvRespData = append(pasvRespData, []byte(line)...)\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tpasvConn.Close()\n\t}\n\treturn\n}\n\nfunc (this *GoFtpClientCmd) disconnect() {\n\tthis.close()\n}\n\nfunc (this *GoFtpClientCmd) close() {\n\tif this.FtpConn != nil {\n\t\tthis.sendCmdRequest([]string{FC_QUIT})\n\t\tthis.recvCmdResponse()\n\n\t\tthis.FtpConn = nil\n\t\tthis.Name = \"\"\n\t\tthis.Params = nil\n\t\tthis.Connected = false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport \"testing\"\n\n\/\/ Test if coverting to radian is more than zero\nfunc TestToRadians(t *testing.T) {\n\ttestObjects := []struct {\n\t\tInput float64\n\t\tExpected float64\n\t}{\n\t\t{1.2, 0.0},\n\t\t{1.4, 0.0},\n\t\t{0.1, 0.0},\n\t\t{0.0, 0.0},\n\t}\n\n\tfor _, test := range testObjects {\n\t\tactual := toRadians(test.Input)\n\t\tif actual < test.Expected {\n\t\t\tt.Errorf(\"Input = %v, Error actual = %v , expected = %v\\n\", test.Input, actual, test.Expected)\n\t\t}\n\t}\n}\n\n\/\/ Test Distance see if tow point distance calculation is more than zero\nfunc TestDistance(t *testing.T) {\n\ttestObjects := []struct {\n\t\tLat1 float64\n\t\tLon1 float64\n\t\tLat2 float64\n\t\tLon2 float64\n\t\tExpected float64\n\t}{\n\t\t{Lat1: -6.8915208, Lon1: 107.6100268, Lat2: 6.8937359, Lon2: 107.6083563, Expected: 0.0},\n\t}\n\n\tfor _, test := range testObjects {\n\t\tactual := Distance(test.Lat1, test.Lon1, test.Lat2, test.Lon2)\n\n\t\tif actual < test.Expected {\n\t\t\tt.Errorf(\"Error actual = %v, Expect more then zero = $v\", actual, test.Expected)\n\t\t}\n\t}\n}\n<commit_msg>fix argument for error<commit_after>package util\n\nimport \"testing\"\n\n\/\/ Test if coverting to radian is more than zero\nfunc TestToRadians(t *testing.T) {\n\ttestObjects := []struct {\n\t\tInput float64\n\t\tExpected float64\n\t}{\n\t\t{1.2, 0.0},\n\t\t{1.4, 0.0},\n\t\t{0.1, 0.0},\n\t\t{0.0, 0.0},\n\t}\n\n\tfor _, test := range testObjects {\n\t\tactual := toRadians(test.Input)\n\t\tif actual < test.Expected {\n\t\t\tt.Errorf(\"Input = %v, Error actual = %v , expected = %v\\n\", test.Input, actual, test.Expected)\n\t\t}\n\t}\n}\n\n\/\/ Test Distance see if tow point distance calculation is more than zero\nfunc TestDistance(t *testing.T) {\n\ttestObjects := []struct {\n\t\tLat1 float64\n\t\tLon1 float64\n\t\tLat2 float64\n\t\tLon2 float64\n\t\tExpected float64\n\t}{\n\t\t{Lat1: -6.8915208, Lon1: 107.6100268, Lat2: 6.8937359, Lon2: 107.6083563, Expected: 0.0},\n\t}\n\n\tfor _, test := range testObjects {\n\t\tactual := Distance(test.Lat1, test.Lon1, test.Lat2, test.Lon2)\n\n\t\tif actual < test.Expected {\n\t\t\tt.Errorf(\"Error actual = %v, Expect more then zero = %v\\n\", actual, test.Expected)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The TestGrid Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage gcs\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"google.golang.org\/api\/iterator\"\n)\n\nvar (\n\t_ Client = &localClient{} \/\/ Ensure this implements interface\n)\n\ntype localIterator struct {\n\tfiles []os.FileInfo\n\tdir string\n\tindex int\n}\n\nfunc convertIsNotExistsErr(err error) error {\n\tif os.IsNotExist(err) {\n\t\treturn storage.ErrObjectNotExist\n\t}\n\treturn err\n}\n\n\/\/ See https:\/\/en.wikipedia.org\/wiki\/File_URI_scheme#How_many_slashes\nvar fileRegex = regexp.MustCompile(`file:\\\/+`)\n\nfunc cleanFilepath(path Path) string {\n\tp := fileRegex.ReplaceAllString(path.String(), \"\/\")\n\t\/\/ TODO(michelle192837): Handle URLs vs. filepaths gracefully.\n\tp, err := url.PathUnescape(p)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn p\n}\n\nfunc (li *localIterator) Next() (*storage.ObjectAttrs, error) {\n\tdefer func() { li.index++ }()\n\tif li.index >= len(li.files) {\n\t\treturn nil, iterator.Done\n\t}\n\tinfo := li.files[li.index]\n\tp, err := NewPath(filepath.Join(li.dir, info.Name()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn objectAttrs(info, *p), nil\n}\n\n\/\/ NewLocalClient returns a GCSUploadClient for the storage.Client.\nfunc NewLocalClient() ConditionalClient {\n\treturn localClient{nil, nil}\n}\n\ntype localClient struct {\n\treadCond *storage.Conditions\n\twriteCond *storage.Conditions\n}\n\nfunc (lc localClient) If(_, _ *storage.Conditions) ConditionalClient {\n\treturn NewLocalClient()\n}\n\nfunc (lc localClient) Copy(ctx context.Context, from, to Path) (*storage.ObjectAttrs, error) {\n\tbuf, err := ioutil.ReadFile(cleanFilepath(from))\n\tif err != nil {\n\t\treturn nil, convertIsNotExistsErr(err)\n\t}\n\treturn lc.Upload(ctx, to, buf, false, \"\")\n}\n\nfunc (lc localClient) Open(ctx context.Context, path Path) (io.ReadCloser, *storage.ReaderObjectAttrs, error) {\n\tr, err := os.Open(cleanFilepath(path))\n\treturn r, &storage.ReaderObjectAttrs{}, err\n}\n\nfunc (lc localClient) Objects(ctx context.Context, path Path, delimiter, startOffset string) Iterator {\n\tp := cleanFilepath(path)\n\tif !strings.HasSuffix(p, \"\/\") {\n\t\tp += \"\/\"\n\t}\n\tfiles, err := ioutil.ReadDir(p)\n\tif err != nil {\n\t\treturn &localIterator{}\n\t}\n\treturn &localIterator{\n\t\tdir: filepath.Dir(p),\n\t\tfiles: files,\n\t}\n}\n\nfunc (lc localClient) Upload(ctx context.Context, path Path, buf []byte, _ bool, _ string) (*storage.ObjectAttrs, error) {\n\terr := ioutil.WriteFile(cleanFilepath(path), buf, 0666)\n\tif err != nil {\n\t\treturn nil, convertIsNotExistsErr(err)\n\t}\n\treturn lc.Stat(ctx, path)\n}\n\nfunc (lc localClient) Stat(ctx context.Context, path Path) (*storage.ObjectAttrs, error) {\n\tinfo, err := os.Stat(cleanFilepath(path))\n\tif err != nil {\n\t\treturn nil, convertIsNotExistsErr(err)\n\t}\n\treturn objectAttrs(info, path), nil\n}\n\nfunc objectAttrs(info os.FileInfo, path Path) *storage.ObjectAttrs {\n\treturn &storage.ObjectAttrs{\n\t\tBucket: path.Bucket(),\n\t\tName: path.Object(),\n\t\tSize: info.Size(),\n\t\tUpdated: info.ModTime(),\n\t}\n}\n<commit_msg>Differentiate between file-does-not-exist and other errors.<commit_after>\/*\nCopyright 2021 The TestGrid Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage gcs\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"google.golang.org\/api\/iterator\"\n)\n\nvar (\n\t_ Client = &localClient{} \/\/ Ensure this implements interface\n)\n\ntype localIterator struct {\n\tfiles []os.FileInfo\n\tdir string\n\tindex int\n}\n\nfunc convertIsNotExistsErr(err error) error {\n\tif os.IsNotExist(err) {\n\t\treturn storage.ErrObjectNotExist\n\t}\n\treturn err\n}\n\n\/\/ See https:\/\/en.wikipedia.org\/wiki\/File_URI_scheme#How_many_slashes\nvar fileRegex = regexp.MustCompile(`file:\\\/+`)\n\nfunc cleanFilepath(path Path) string {\n\tp := fileRegex.ReplaceAllString(path.String(), \"\/\")\n\t\/\/ TODO(michelle192837): Handle URLs vs. filepaths gracefully.\n\tp, err := url.PathUnescape(p)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn p\n}\n\nfunc (li *localIterator) Next() (*storage.ObjectAttrs, error) {\n\tdefer func() { li.index++ }()\n\tif li.index >= len(li.files) {\n\t\treturn nil, iterator.Done\n\t}\n\tinfo := li.files[li.index]\n\tp, err := NewPath(filepath.Join(li.dir, info.Name()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn objectAttrs(info, *p), nil\n}\n\n\/\/ NewLocalClient returns a GCSUploadClient for the storage.Client.\nfunc NewLocalClient() ConditionalClient {\n\treturn localClient{nil, nil}\n}\n\ntype localClient struct {\n\treadCond *storage.Conditions\n\twriteCond *storage.Conditions\n}\n\nfunc (lc localClient) If(_, _ *storage.Conditions) ConditionalClient {\n\treturn NewLocalClient()\n}\n\nfunc (lc localClient) Copy(ctx context.Context, from, to Path) (*storage.ObjectAttrs, error) {\n\tbuf, err := ioutil.ReadFile(cleanFilepath(from))\n\tif err != nil {\n\t\treturn nil, convertIsNotExistsErr(err)\n\t}\n\treturn lc.Upload(ctx, to, buf, false, \"\")\n}\n\nfunc (lc localClient) Open(ctx context.Context, path Path) (io.ReadCloser, *storage.ReaderObjectAttrs, error) {\n\tr, err := os.Open(cleanFilepath(path))\n\treturn r, &storage.ReaderObjectAttrs{}, convertIsNotExistsErr(err)\n}\n\nfunc (lc localClient) Objects(ctx context.Context, path Path, delimiter, startOffset string) Iterator {\n\tp := cleanFilepath(path)\n\tif !strings.HasSuffix(p, \"\/\") {\n\t\tp += \"\/\"\n\t}\n\tfiles, err := ioutil.ReadDir(p)\n\tif err != nil {\n\t\treturn &localIterator{}\n\t}\n\treturn &localIterator{\n\t\tdir: filepath.Dir(p),\n\t\tfiles: files,\n\t}\n}\n\nfunc (lc localClient) Upload(ctx context.Context, path Path, buf []byte, _ bool, _ string) (*storage.ObjectAttrs, error) {\n\terr := ioutil.WriteFile(cleanFilepath(path), buf, 0666)\n\tif err != nil {\n\t\treturn nil, convertIsNotExistsErr(err)\n\t}\n\treturn lc.Stat(ctx, path)\n}\n\nfunc (lc localClient) Stat(ctx context.Context, path Path) (*storage.ObjectAttrs, error) {\n\tinfo, err := os.Stat(cleanFilepath(path))\n\tif err != nil {\n\t\treturn nil, convertIsNotExistsErr(err)\n\t}\n\treturn objectAttrs(info, path), nil\n}\n\nfunc objectAttrs(info os.FileInfo, path Path) *storage.ObjectAttrs {\n\treturn &storage.ObjectAttrs{\n\t\tBucket: path.Bucket(),\n\t\tName: path.Object(),\n\t\tSize: info.Size(),\n\t\tUpdated: info.ModTime(),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package shell\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/wdclient\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n)\n\nfunc init() {\n\tCommands = append(Commands, &commandVolumeTierMove{})\n}\n\ntype volumeTierMoveJob struct {\n\tsrc pb.ServerAddress\n\tvid needle.VolumeId\n}\n\ntype commandVolumeTierMove struct {\n\tactiveServers sync.Map\n\tqueues map[pb.ServerAddress]chan volumeTierMoveJob\n\t\/\/activeServers map[pb.ServerAddress]struct{}\n\t\/\/activeServersLock sync.Mutex\n\t\/\/activeServersCond *sync.Cond\n}\n\nfunc (c *commandVolumeTierMove) Name() string {\n\treturn \"volume.tier.move\"\n}\n\nfunc (c *commandVolumeTierMove) Help() string {\n\treturn `change a volume from one disk type to another\n\n\tvolume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collectionPattern=\"\"] [-fullPercent=95] [-quietFor=1h]\n\n\tEven if the volume is replicated, only one replica will be changed and the rest replicas will be dropped.\n\tSo \"volume.fix.replication\" and \"volume.balance\" should be followed.\n\n`\n}\n\nfunc (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {\n\n\ttierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)\n\tcollectionPattern := tierCommand.String(\"collectionPattern\", \"\", \"match with wildcard characters '*' and '?'\")\n\tfullPercentage := tierCommand.Float64(\"fullPercent\", 95, \"the volume reaches the percentage of max volume size\")\n\tquietPeriod := tierCommand.Duration(\"quietFor\", 24*time.Hour, \"select volumes without no writes for this period\")\n\tsource := tierCommand.String(\"fromDiskType\", \"\", \"the source disk type\")\n\ttarget := tierCommand.String(\"toDiskType\", \"\", \"the target disk type\")\n\tlimitWorkers := tierCommand.Int(\"limitWorkers\", 0, \"limit the number of active copying workers\")\n\tapplyChange := tierCommand.Bool(\"force\", false, \"actually apply the changes\")\n\tif err = tierCommand.Parse(args); err != nil {\n\t\treturn nil\n\t}\n\n\tif err = commandEnv.confirmIsLocked(args); err != nil {\n\t\treturn\n\t}\n\n\tfromDiskType := types.ToDiskType(*source)\n\ttoDiskType := types.ToDiskType(*target)\n\n\tif fromDiskType == toDiskType {\n\t\treturn fmt.Errorf(\"source tier %s is the same as target tier %s\", fromDiskType, toDiskType)\n\t}\n\n\t\/\/ collect topology information\n\ttopologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ collect all volumes that should change\n\tvolumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collectionPattern, *fullPercentage, *quietPeriod)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"tier move volumes: %v\\n\", volumeIds)\n\n\t_, allLocations := collectVolumeReplicaLocations(topologyInfo)\n\tallLocations = filterLocationsByDiskType(allLocations, toDiskType)\n\tkeepDataNodesSorted(allLocations, toDiskType)\n\t\n\tif len(allLocations) > 0 && *limitWorkers > 0 && *limitWorkers < len(allLocations) {\n\t\tallLocations = allLocations[:*limitWorkers]\n\t}\n\n\twg := sync.WaitGroup{}\n\tbufferLen := len(allLocations)\n\tc.queues = make(map[pb.ServerAddress]chan volumeTierMoveJob)\n\t\n\tfor _, dst := range allLocations {\n\t\tdestServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)\n\t\tc.queues[destServerAddress] = make(chan volumeTierMoveJob, bufferLen)\n\n\t\twg.Add(1)\n\t\tgo func (dst location, jobs <-chan volumeTierMoveJob, applyChanges bool) {\n\t\t\tfor job := range jobs {\n\t\t\t\tfmt.Fprintf(writer, \"moving volume %d from %s to %s with disk type %s ...\\n\", job.vid, job.src, dst.dataNode.Id, toDiskType.ReadableString())\n\t\t\t\tif !applyChanges {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlocations, found := commandEnv.MasterClient.GetLocations(uint32(job.vid))\n\t\t\t\tif !found {\n\t\t\t\t\tfmt.Printf(\"volume %d not found\", job.vid)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tunlock := c.Lock(job.src)\n\n\t\t\t\tif err := c.doMoveOneVolume(commandEnv, writer, job.vid, toDiskType, locations, job.src, dst); err != nil {\n\t\t\t\t\tfmt.Fprintf(writer, \"move volume %d %s => %s: %v\\n\", job.vid, job.src, dst.dataNode.Id, err)\n\t\t\t\t}\n\t\t\t\tunlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(dst, c.queues[destServerAddress], *applyChange)\n\t}\n\n\tfor _, vid := range volumeIds {\n\t\tif err = c.doVolumeTierMove(commandEnv, writer, vid, toDiskType, allLocations); err != nil {\n\t\t\tfmt.Printf(\"tier move volume %d: %v\\n\", vid, err)\n\t\t}\n\t\tallLocations = rotateDataNodes(allLocations)\n\t}\n\tfor key, _ := range c.queues {\n\t\tclose(c.queues[key])\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}\n\nfunc (c *commandVolumeTierMove) Lock(key pb.ServerAddress) func() {\n\tvalue, _ := c.activeServers.LoadOrStore(key, &sync.Mutex{})\n\tmtx := value.(*sync.Mutex)\n\tmtx.Lock()\n\n\treturn func() { mtx.Unlock() }\n}\n\nfunc filterLocationsByDiskType(dataNodes []location, diskType types.DiskType) (ret []location) {\n\tfor _, loc := range dataNodes {\n\t\t_, found := loc.dataNode.DiskInfos[string(diskType)]\n\t\tif found {\n\t\t\tret = append(ret, loc)\n\t\t}\n\t}\n\treturn\n}\n\nfunc rotateDataNodes(dataNodes []location) []location {\n\tif len(dataNodes) > 0 {\n\t\treturn append(dataNodes[1:], dataNodes[0])\n\t} else {\n\t\treturn dataNodes\n\t}\n}\n\nfunc isOneOf(server string, locations []wdclient.Location) bool {\n\tfor _, loc := range locations {\n\t\tif server == loc.Url {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c *commandVolumeTierMove) doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location) (err error) {\n\t\/\/ find volume location\n\tlocations, found := commandEnv.MasterClient.GetLocations(uint32(vid))\n\tif !found {\n\t\treturn fmt.Errorf(\"volume %d not found\", vid)\n\t}\n\n\t\/\/ find one server with the most empty volume slots with target disk type\n\thasFoundTarget := false\n\tfn := capacityByFreeVolumeCount(toDiskType)\n\tfor _, dst := range allLocations {\n\t\tif fn(dst.dataNode) > 0 && !hasFoundTarget {\n\t\t\t\/\/ ask the volume server to replicate the volume\n\t\t\tif isOneOf(dst.dataNode.Id, locations) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar sourceVolumeServer pb.ServerAddress\n\t\t\tfor _, loc := range locations {\n\t\t\t\tif loc.Url != dst.dataNode.Id {\n\t\t\t\t\tsourceVolumeServer = loc.ServerAddress()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif sourceVolumeServer == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thasFoundTarget = true\n\n\t\t\t\/\/ adjust volume count\n\t\t\tdst.dataNode.DiskInfos[string(toDiskType)].VolumeCount++\n\n\t\t\tdestServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)\n\t\t\tc.queues[destServerAddress] <- volumeTierMoveJob{sourceVolumeServer, vid}\n\t\t}\n\t}\n\n\tif !hasFoundTarget {\n\t\tfmt.Fprintf(writer, \"can not find disk type %s for volume %d\\n\", toDiskType.ReadableString(), vid)\n\t}\n\n\treturn nil\n}\n\nfunc (c *commandVolumeTierMove) doMoveOneVolume(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, locations []wdclient.Location, sourceVolumeServer pb.ServerAddress, dst location) (err error) {\n\n\t\/\/ mark all replicas as read only\n\tif err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, false); err != nil {\n\t\treturn fmt.Errorf(\"mark volume %d as readonly on %s: %v\", vid, locations[0].Url, err)\n\t}\n\tif err = LiveMoveVolume(commandEnv.option.GrpcDialOption, writer, vid, sourceVolumeServer, pb.NewServerAddressFromDataNode(dst.dataNode), 5*time.Second, toDiskType.ReadableString(), true); err != nil {\n\n\t\t\/\/ mark all replicas as writable\n\t\tif err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, true); err != nil {\n\t\t\tglog.Errorf(\"mark volume %d as writable on %s: %v\", vid, locations[0].Url, err)\n\t\t}\n\n\t\treturn fmt.Errorf(\"move volume %d %s => %s : %v\", vid, locations[0].Url, dst.dataNode.Id, err)\n\t}\n\n\t\/\/ adjust volume count\n\tdst.dataNode.DiskInfos[string(toDiskType)].VolumeCount++\n\n\t\/\/ remove the remaining replicas\n\tfor _, loc := range locations {\n\t\tif loc.Url != dst.dataNode.Id && loc.ServerAddress() != sourceVolumeServer {\n\t\t\tif err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.ServerAddress()); err != nil {\n\t\t\t\tfmt.Fprintf(writer, \"failed to delete volume %d on %s: %v\\n\", vid, loc.Url, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, collectionPattern string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {\n\n\tquietSeconds := int64(quietPeriod \/ time.Second)\n\tnowUnixSeconds := time.Now().Unix()\n\n\tfmt.Printf(\"collect %s volumes quiet for: %d seconds\\n\", sourceTier, quietSeconds)\n\n\tvidMap := make(map[uint32]bool)\n\teachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tfor _, diskInfo := range dn.DiskInfos {\n\t\t\tfor _, v := range diskInfo.VolumeInfos {\n\t\t\t\t\/\/ check collection name pattern\n\t\t\t\tif collectionPattern != \"\" {\n\t\t\t\t\tmatched, err := filepath.Match(collectionPattern, v.Collection)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif !matched {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {\n\t\t\t\t\tif float64(v.Size) > fullPercentage\/100*float64(volumeSizeLimitMb)*1024*1024 {\n\t\t\t\t\t\tvidMap[v.Id] = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tfor vid := range vidMap {\n\t\tvids = append(vids, needle.VolumeId(vid))\n\t}\n\n\treturn\n}\n<commit_msg>adjust volume count even when not applying the changes<commit_after>package shell\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/wdclient\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n)\n\nfunc init() {\n\tCommands = append(Commands, &commandVolumeTierMove{})\n}\n\ntype volumeTierMoveJob struct {\n\tsrc pb.ServerAddress\n\tvid needle.VolumeId\n}\n\ntype commandVolumeTierMove struct {\n\tactiveServers sync.Map\n\tqueues map[pb.ServerAddress]chan volumeTierMoveJob\n\t\/\/activeServers map[pb.ServerAddress]struct{}\n\t\/\/activeServersLock sync.Mutex\n\t\/\/activeServersCond *sync.Cond\n}\n\nfunc (c *commandVolumeTierMove) Name() string {\n\treturn \"volume.tier.move\"\n}\n\nfunc (c *commandVolumeTierMove) Help() string {\n\treturn `change a volume from one disk type to another\n\n\tvolume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collectionPattern=\"\"] [-fullPercent=95] [-quietFor=1h] [-parallelLimit=4]\n\n\tEven if the volume is replicated, only one replica will be changed and the rest replicas will be dropped.\n\tSo \"volume.fix.replication\" and \"volume.balance\" should be followed.\n\n`\n}\n\nfunc (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {\n\n\ttierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)\n\tcollectionPattern := tierCommand.String(\"collectionPattern\", \"\", \"match with wildcard characters '*' and '?'\")\n\tfullPercentage := tierCommand.Float64(\"fullPercent\", 95, \"the volume reaches the percentage of max volume size\")\n\tquietPeriod := tierCommand.Duration(\"quietFor\", 24*time.Hour, \"select volumes without no writes for this period\")\n\tsource := tierCommand.String(\"fromDiskType\", \"\", \"the source disk type\")\n\ttarget := tierCommand.String(\"toDiskType\", \"\", \"the target disk type\")\n\tparallelLimit := tierCommand.Int(\"parallelLimit\", 0, \"limit the number of parallel copying jobs\")\n\tapplyChange := tierCommand.Bool(\"force\", false, \"actually apply the changes\")\n\tif err = tierCommand.Parse(args); err != nil {\n\t\treturn nil\n\t}\n\n\tif err = commandEnv.confirmIsLocked(args); err != nil {\n\t\treturn\n\t}\n\n\tfromDiskType := types.ToDiskType(*source)\n\ttoDiskType := types.ToDiskType(*target)\n\n\tif fromDiskType == toDiskType {\n\t\treturn fmt.Errorf(\"source tier %s is the same as target tier %s\", fromDiskType, toDiskType)\n\t}\n\n\t\/\/ collect topology information\n\ttopologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ collect all volumes that should change\n\tvolumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collectionPattern, *fullPercentage, *quietPeriod)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"tier move volumes: %v\\n\", volumeIds)\n\n\t_, allLocations := collectVolumeReplicaLocations(topologyInfo)\n\tallLocations = filterLocationsByDiskType(allLocations, toDiskType)\n\tkeepDataNodesSorted(allLocations, toDiskType)\n\n\tif len(allLocations) > 0 && *parallelLimit > 0 && *parallelLimit < len(allLocations) {\n\t\tallLocations = allLocations[:*parallelLimit]\n\t}\n\n\twg := sync.WaitGroup{}\n\tbufferLen := len(allLocations)\n\tc.queues = make(map[pb.ServerAddress]chan volumeTierMoveJob)\n\n\tfor _, dst := range allLocations {\n\t\tdestServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)\n\t\tc.queues[destServerAddress] = make(chan volumeTierMoveJob, bufferLen)\n\n\t\twg.Add(1)\n\t\tgo func(dst location, jobs <-chan volumeTierMoveJob, applyChanges bool) {\n\t\t\tdefer wg.Done()\n\t\t\tfor job := range jobs {\n\t\t\t\tfmt.Fprintf(writer, \"moving volume %d from %s to %s with disk type %s ...\\n\", job.vid, job.src, dst.dataNode.Id, toDiskType.ReadableString())\n\n\t\t\t\tlocations, found := commandEnv.MasterClient.GetLocations(uint32(job.vid))\n\t\t\t\tif !found {\n\t\t\t\t\tfmt.Printf(\"volume %d not found\", job.vid)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tunlock := c.Lock(job.src)\n\n\t\t\t\tif err := c.doMoveOneVolume(commandEnv, writer, job.vid, toDiskType, locations, job.src, dst, applyChanges); err != nil {\n\t\t\t\t\tfmt.Fprintf(writer, \"move volume %d %s => %s: %v\\n\", job.vid, job.src, dst.dataNode.Id, err)\n\t\t\t\t}\n\t\t\t\tunlock()\n\t\t\t}\n\t\t}(dst, c.queues[destServerAddress], *applyChange)\n\t}\n\n\tfor _, vid := range volumeIds {\n\t\tif err = c.doVolumeTierMove(commandEnv, writer, vid, toDiskType, allLocations); err != nil {\n\t\t\tfmt.Printf(\"tier move volume %d: %v\\n\", vid, err)\n\t\t}\n\t\tallLocations = rotateDataNodes(allLocations)\n\t}\n\tfor key, _ := range c.queues {\n\t\tclose(c.queues[key])\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}\n\nfunc (c *commandVolumeTierMove) Lock(key pb.ServerAddress) func() {\n\tvalue, _ := c.activeServers.LoadOrStore(key, &sync.Mutex{})\n\tmtx := value.(*sync.Mutex)\n\tmtx.Lock()\n\n\treturn func() { mtx.Unlock() }\n}\n\nfunc filterLocationsByDiskType(dataNodes []location, diskType types.DiskType) (ret []location) {\n\tfor _, loc := range dataNodes {\n\t\t_, found := loc.dataNode.DiskInfos[string(diskType)]\n\t\tif found {\n\t\t\tret = append(ret, loc)\n\t\t}\n\t}\n\treturn\n}\n\nfunc rotateDataNodes(dataNodes []location) []location {\n\tif len(dataNodes) > 0 {\n\t\treturn append(dataNodes[1:], dataNodes[0])\n\t} else {\n\t\treturn dataNodes\n\t}\n}\n\nfunc isOneOf(server string, locations []wdclient.Location) bool {\n\tfor _, loc := range locations {\n\t\tif server == loc.Url {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c *commandVolumeTierMove) doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location) (err error) {\n\t\/\/ find volume location\n\tlocations, found := commandEnv.MasterClient.GetLocations(uint32(vid))\n\tif !found {\n\t\treturn fmt.Errorf(\"volume %d not found\", vid)\n\t}\n\n\t\/\/ find one server with the most empty volume slots with target disk type\n\thasFoundTarget := false\n\tfn := capacityByFreeVolumeCount(toDiskType)\n\tfor _, dst := range allLocations {\n\t\tif fn(dst.dataNode) > 0 && !hasFoundTarget {\n\t\t\t\/\/ ask the volume server to replicate the volume\n\t\t\tif isOneOf(dst.dataNode.Id, locations) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar sourceVolumeServer pb.ServerAddress\n\t\t\tfor _, loc := range locations {\n\t\t\t\tif loc.Url != dst.dataNode.Id {\n\t\t\t\t\tsourceVolumeServer = loc.ServerAddress()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif sourceVolumeServer == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thasFoundTarget = true\n\n\t\t\t\/\/ adjust volume count\n\t\t\tdst.dataNode.DiskInfos[string(toDiskType)].VolumeCount++\n\n\t\t\tdestServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)\n\t\t\tc.queues[destServerAddress] <- volumeTierMoveJob{sourceVolumeServer, vid}\n\t\t}\n\t}\n\n\tif !hasFoundTarget {\n\t\tfmt.Fprintf(writer, \"can not find disk type %s for volume %d\\n\", toDiskType.ReadableString(), vid)\n\t}\n\n\treturn nil\n}\n\nfunc (c *commandVolumeTierMove) doMoveOneVolume(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, locations []wdclient.Location, sourceVolumeServer pb.ServerAddress, dst location, applyChanges bool) (err error) {\n\n\t\/\/ mark all replicas as read only\n\tif applyChanges {\n\t\tif err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, false); err != nil {\n\t\t\treturn fmt.Errorf(\"mark volume %d as readonly on %s: %v\", vid, locations[0].Url, err)\n\t\t}\n\t\tif err = LiveMoveVolume(commandEnv.option.GrpcDialOption, writer, vid, sourceVolumeServer, pb.NewServerAddressFromDataNode(dst.dataNode), 5*time.Second, toDiskType.ReadableString(), true); err != nil {\n\n\t\t\t\/\/ mark all replicas as writable\n\t\t\tif err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, true); err != nil {\n\t\t\t\tglog.Errorf(\"mark volume %d as writable on %s: %v\", vid, locations[0].Url, err)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"move volume %d %s => %s : %v\", vid, locations[0].Url, dst.dataNode.Id, err)\n\t\t}\n\t}\n\n\t\/\/ adjust volume count\n\tdst.dataNode.DiskInfos[string(toDiskType)].VolumeCount++\n\n\t\/\/ remove the remaining replicas\n\tfor _, loc := range locations {\n\t\tif loc.Url != dst.dataNode.Id && loc.ServerAddress() != sourceVolumeServer {\n\t\t\tif applyChanges {\n\t\t\t\tif err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.ServerAddress()); err != nil {\n\t\t\t\t\tfmt.Fprintf(writer, \"failed to delete volume %d on %s: %v\\n\", vid, loc.Url, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ reduce volume count? Not really necessary since they are \"more\" full and will not be a candidate to move to\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, collectionPattern string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {\n\n\tquietSeconds := int64(quietPeriod \/ time.Second)\n\tnowUnixSeconds := time.Now().Unix()\n\n\tfmt.Printf(\"collect %s volumes quiet for: %d seconds\\n\", sourceTier, quietSeconds)\n\n\tvidMap := make(map[uint32]bool)\n\teachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tfor _, diskInfo := range dn.DiskInfos {\n\t\t\tfor _, v := range diskInfo.VolumeInfos {\n\t\t\t\t\/\/ check collection name pattern\n\t\t\t\tif collectionPattern != \"\" {\n\t\t\t\t\tmatched, err := filepath.Match(collectionPattern, v.Collection)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif !matched {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {\n\t\t\t\t\tif float64(v.Size) > fullPercentage\/100*float64(volumeSizeLimitMb)*1024*1024 {\n\t\t\t\t\t\tvidMap[v.Id] = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tfor vid := range vidMap {\n\t\tvids = append(vids, needle.VolumeId(vid))\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n*\tO Problema do Caixeiro Viajante (PCV) eh um problema que tenta determinar a\n* menor rota para percorrer uma serie de cidades (visitando uma unica vez cada\n* uma delas), retornando a cidade de origem.\n*\n*\tUtilizando uma matriz de distancia para representar um grafo nao direcionado.\n*\n* Supondo que temos o seguinte grafo:\n*\n* 6\n* -------------------\n* | 3 1 |\n* (000)-----(001)-----(002)\n* | \\ \/ | \\ \/ |\n* | \\ 6\/ | \\2 \/ |\n* | \\ \/ | \\ \/ |\n* 2| \/ 8| \/ |5\n* | \/ \\ | \/ \\ |\n* | \/ 3\\ | \/4 \\ |\n* | \/ \\ | \/ \\ |\n* (003)-----(004)-----(005)\n* | 6 4 |\n* -------------------\n* 1\n*\n* Matriz de Distancia\n* 0 1 2 3 4 5\n* 0 0 3 6 2 3 -\n* 1 3 0 1 6 8 2\n* 2 6 1 0 - 4 5\n* 3 2 6 - 0 6 1\n* 4 3 8 4 6 0 4\n* 5 - 2 5 1 4 0\n*\n* Melhor solucao:\n* 0 - 3 - 5 - 1 - 2 - 4 - 0: 13\n*\/\n\npackage caixeiroviajante\n\nimport \"fmt\"\n\nconst vertices = 6\nconst infinito = 99999999\n\nvar matrizDistancia [][]int\n\nvar tempSolucao []int \/\/ Solucao temporaria\nvar melhorSolucao []int \/\/ Melhor solucao\nvar visitados []bool \/\/ Vertices visitados\n\nvar valorMelhorSolucao int\nvar valorSolucaoAtual int\n\nfunc caixeiroViajanteAux(x int) {\n\n \/\/ Significa que ja nao eh mais a melhor solucao podemos parar por aqui\n if valorSolucaoAtual > valorMelhorSolucao {\n return\n }\n\n \/\/ Significa que o vetor da solucao temporaria esta completo\n if x == vertices {\n distancia := matrizDistancia[tempSolucao[x-1]][tempSolucao[0]]\n\n \/\/ Significa que encontrou uma solucao melhor\n if distancia < infinito && (valorSolucaoAtual + distancia) < valorMelhorSolucao {\n\n \/\/ Temos uma solucao melhor\n valorMelhorSolucao = valorSolucaoAtual + distancia\n\n \/\/ Copia todo o vetor para a melhor solucao\n for i := 0; i < vertices; i++ {\n melhorSolucao[i] = tempSolucao[i]\n }\n }\n return\n }\n\n \/\/ Ultimo vertice que se encontra na solucao temporaria\n ultimo := tempSolucao[x-1]\n\n \/\/ Percorre todas as colunas da matriz de distancia na linha do ultimo vertice\n for i := 0; i < vertices; i++ {\n\n \/\/ Se a posicao ainda nao foi visitada e o valor da matriz eh menor que infinito\n if visitados[i] == false && matrizDistancia[ultimo][i] < infinito {\n \/\/ Marque a posicao como visitada\n visitados[i] = true\n \/\/ Carrega o vertice atual na solucao temporaria\n tempSolucao[x] = i\n \/\/ Incrementa o total do caminho percorrido com base na posicao da matriz\n valorSolucaoAtual = valorSolucaoAtual + matrizDistancia[ultimo][i]\n \/\/ Chama recursivamente para o proximo vertice\n caixeiroViajanteAux(x+1)\n \/\/ Se ainda nao terminou decrementa o valor da variabel que guarta o total do caminho\n valorSolucaoAtual = valorSolucaoAtual - matrizDistancia[ultimo][i]\n \/\/ Define como nao visitada para poder ser acessada por outro vertice\n visitados[i] = false\n }\n }\n}\n\nfunc caixeiroViajante(posicaoInicial int) {\n \/\/ Verifica se a posicao eh valida\n if posicaoInicial < vertices {\n visitados[posicaoInicial] = true \/\/ Marca o primeiro vertice como visitado\n tempSolucao[0] = posicaoInicial \/\/ Coloca a posicao inicial na primeira posicao da solucao temporaria\n caixeiroViajanteAux(1) \/\/ Chama o metodo auxiliar do caixeiro viajante\n } else {\n fmt.Println(\"Vertice inicial invalid\")\n }\n}\n\n\/\/ Inicia os vetores e valores padrao\nfunc inicia() {\n\n valorMelhorSolucao = infinito\n valorSolucaoAtual = 0\n\n for i := 0; i < vertices; i++ {\n visitados = append(visitados, false)\n tempSolucao = append(tempSolucao, -1)\n melhorSolucao = append(melhorSolucao, -1)\n }\n\n \/\/ Cria a matriz de distancia\n linha0 := []int{ 0, 3, 6, 2, 3, infinito}\n linha1 := []int{ 3, 0, 1, 6, 8, 2}\n linha2 := []int{ 6, 1, 0, infinito, 4, 5}\n linha3 := []int{ 2, 6, infinito, 0, 6, 1}\n linha4 := []int{ 3, 8, 4, 6, 0, 4}\n linha5 := []int{infinito, 2, 5, 1, 4, 0}\n\n matrizDistancia = append(matrizDistancia, linha0)\n matrizDistancia = append(matrizDistancia, linha1)\n matrizDistancia = append(matrizDistancia, linha2)\n matrizDistancia = append(matrizDistancia, linha3)\n matrizDistancia = append(matrizDistancia, linha4)\n matrizDistancia = append(matrizDistancia, linha5)\n}\n\n\/\/ Main temporaria\nfunc Run() {\n inicia()\n caixeiroViajante(0)\n\n fmt.Println(\"\\nCaixeiro Viajante\")\n fmt.Println(\"Caminho minimo:\", valorMelhorSolucao)\n for i := 0; i < vertices; i++ {\n fmt.Print(melhorSolucao[i], \", \")\n }\n fmt.Print(melhorSolucao[0])\n fmt.Println(\"\\n\")\n}\n<commit_msg>Indetacao no caixeiroviajante.go<commit_after>\/*\n * O Problema do Caixeiro Viajante (PCV) eh um problema que tenta determinar a\n * menor rota para percorrer uma serie de cidades (visitando uma unica vez cada\n * uma delas), retornando a cidade de origem.\n *\n * Utilizando uma matriz de distancia para representar um grafo nao direcionado.\n *\n * Supondo que temos o seguinte grafo:\n *\n * 6\n * -------------------\n * | 3 1 |\n * (000)-----(001)-----(002)\n * | \\ \/ | \\ \/ |\n * | \\ 6\/ | \\2 \/ |\n * | \\ \/ | \\ \/ |\n * 2| \/ 8| \/ |5\n * | \/ \\ | \/ \\ |\n * | \/ 3\\ | \/4 \\ |\n * | \/ \\ | \/ \\ |\n * (003)-----(004)-----(005)\n * | 6 4 |\n * -------------------\n * 1\n *\n * Matriz de Distancia\n * 0 1 2 3 4 5\n * 0 0 3 6 2 3 -\n * 1 3 0 1 6 8 2\n * 2 6 1 0 - 4 5\n * 3 2 6 - 0 6 1\n * 4 3 8 4 6 0 4\n * 5 - 2 5 1 4 0\n *\n * Melhor solucao:\n * 0 - 3 - 5 - 1 - 2 - 4 - 0: 13\n *\/\n\npackage caixeiroviajante\n\nimport \"fmt\"\n\nconst vertices = 6\nconst infinito = 99999999\n\nvar matrizDistancia [][]int\n\nvar tempSolucao []int \/\/ Solucao temporaria\nvar melhorSolucao []int \/\/ Melhor solucao\nvar visitados []bool \/\/ Vertices visitados\n\nvar valorMelhorSolucao int\nvar valorSolucaoAtual int\n\nfunc caixeiroViajanteAux(x int) {\n\n \/\/ Significa que ja nao eh mais a melhor solucao podemos parar por aqui\n if valorSolucaoAtual > valorMelhorSolucao {\n return\n }\n\n \/\/ Significa que o vetor da solucao temporaria esta completo\n if x == vertices {\n distancia := matrizDistancia[tempSolucao[x-1]][tempSolucao[0]]\n\n \/\/ Significa que encontrou uma solucao melhor\n if distancia < infinito && (valorSolucaoAtual + distancia) < valorMelhorSolucao {\n\n \/\/ Temos uma solucao melhor\n valorMelhorSolucao = valorSolucaoAtual + distancia\n\n \/\/ Copia todo o vetor para a melhor solucao\n for i := 0; i < vertices; i++ {\n melhorSolucao[i] = tempSolucao[i]\n }\n }\n return\n }\n\n \/\/ Ultimo vertice que se encontra na solucao temporaria\n ultimo := tempSolucao[x-1]\n\n \/\/ Percorre todas as colunas da matriz de distancia na linha do ultimo vertice\n for i := 0; i < vertices; i++ {\n\n \/\/ Se a posicao ainda nao foi visitada e o valor da matriz eh menor que infinito\n if visitados[i] == false && matrizDistancia[ultimo][i] < infinito {\n \/\/ Marque a posicao como visitada\n visitados[i] = true\n \/\/ Carrega o vertice atual na solucao temporaria\n tempSolucao[x] = i\n \/\/ Incrementa o total do caminho percorrido com base na posicao da matriz\n valorSolucaoAtual = valorSolucaoAtual + matrizDistancia[ultimo][i]\n \/\/ Chama recursivamente para o proximo vertice\n caixeiroViajanteAux(x+1)\n \/\/ Se ainda nao terminou decrementa o valor da variabel que guarta o total do caminho\n valorSolucaoAtual = valorSolucaoAtual - matrizDistancia[ultimo][i]\n \/\/ Define como nao visitada para poder ser acessada por outro vertice\n visitados[i] = false\n }\n }\n}\n\nfunc caixeiroViajante(posicaoInicial int) {\n \/\/ Verifica se a posicao eh valida\n if posicaoInicial < vertices {\n visitados[posicaoInicial] = true \/\/ Marca o primeiro vertice como visitado\n tempSolucao[0] = posicaoInicial \/\/ Coloca a posicao inicial na primeira posicao da solucao temporaria\n caixeiroViajanteAux(1) \/\/ Chama o metodo auxiliar do caixeiro viajante\n } else {\n fmt.Println(\"Vertice inicial invalid\")\n }\n}\n\n\/\/ Inicia os vetores e valores padrao\nfunc inicia() {\n\n valorMelhorSolucao = infinito\n valorSolucaoAtual = 0\n\n for i := 0; i < vertices; i++ {\n visitados = append(visitados, false)\n tempSolucao = append(tempSolucao, -1)\n melhorSolucao = append(melhorSolucao, -1)\n }\n\n \/\/ Cria a matriz de distancia\n linha0 := []int{ 0, 3, 6, 2, 3, infinito}\n linha1 := []int{ 3, 0, 1, 6, 8, 2}\n linha2 := []int{ 6, 1, 0, infinito, 4, 5}\n linha3 := []int{ 2, 6, infinito, 0, 6, 1}\n linha4 := []int{ 3, 8, 4, 6, 0, 4}\n linha5 := []int{infinito, 2, 5, 1, 4, 0}\n\n matrizDistancia = append(matrizDistancia, linha0)\n matrizDistancia = append(matrizDistancia, linha1)\n matrizDistancia = append(matrizDistancia, linha2)\n matrizDistancia = append(matrizDistancia, linha3)\n matrizDistancia = append(matrizDistancia, linha4)\n matrizDistancia = append(matrizDistancia, linha5)\n}\n\n\/\/ Funcao que roda o exemplo do caixeiro viajante\nfunc Run() {\n inicia()\n caixeiroViajante(0)\n\n fmt.Println(\"\\nCaixeiro Viajante\")\n fmt.Println(\"Caminho minimo:\", valorMelhorSolucao)\n for i := 0; i < vertices; i++ {\n fmt.Print(melhorSolucao[i], \", \")\n }\n fmt.Print(melhorSolucao[0])\n fmt.Println(\"\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package hoverfly_test\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/SpectoLabs\/hoverfly\/functional-tests\"\n\t\"github.com\/dghubble\/sling\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"When I run Hoverfly\", func() {\n\n\tvar (\n\t\thoverfly *functional_tests.Hoverfly\n\n\t\tusername = \"ft_user\"\n\t\tpassword = \"ft_password\"\n\t)\n\n\tBeforeEach(func() {\n\t\thoverfly = functional_tests.NewHoverfly()\n\t})\n\n\tContext(\"with auth turned on\", func() {\n\n\t\tBeforeEach(func() {\n\t\t\thoverfly.Start(\"-db\", \"boltdb\", \"-add\", \"-username\", username, \"-password\", password)\n\t\t\thoverfly.Start(\"-db\", \"boltdb\", \"-auth\", \"true\")\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\thoverfly.Stop()\n\t\t})\n\n\t\tIt(\"should return a 407 when trying to proxy without auth credentials\", func() {\n\t\t\tresp := hoverfly.Proxy(sling.New().Get(\"http:\/\/hoverfly.io\"))\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusProxyAuthRequired))\n\t\t})\n\n\t\tIt(\"should return a 502 (no match in simulate mode) when trying to proxy with auth credentials\", func() {\n\t\t\tresp := hoverfly.ProxyWithAuth(sling.New().Get(\"http:\/\/hoverfly.io\"), username, password)\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusBadGateway))\n\t\t})\n\t})\n})\n<commit_msg>Added a test for incorrect auth credentials<commit_after>package hoverfly_test\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/SpectoLabs\/hoverfly\/functional-tests\"\n\t\"github.com\/dghubble\/sling\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"When I run Hoverfly\", func() {\n\n\tvar (\n\t\thoverfly *functional_tests.Hoverfly\n\n\t\tusername = \"ft_user\"\n\t\tpassword = \"ft_password\"\n\t)\n\n\tBeforeEach(func() {\n\t\thoverfly = functional_tests.NewHoverfly()\n\t})\n\n\tContext(\"with auth turned on\", func() {\n\n\t\tBeforeEach(func() {\n\t\t\thoverfly.Start(\"-db\", \"boltdb\", \"-add\", \"-username\", username, \"-password\", password)\n\t\t\thoverfly.Start(\"-db\", \"boltdb\", \"-auth\", \"true\")\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\thoverfly.Stop()\n\t\t})\n\n\t\tIt(\"should return a 407 when trying to proxy without auth credentials\", func() {\n\t\t\tresp := hoverfly.Proxy(sling.New().Get(\"http:\/\/hoverfly.io\"))\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusProxyAuthRequired))\n\t\t})\n\n\t\tIt(\"should return a 407 (no match in simulate mode) when trying to proxy with incorrect auth credentials\", func() {\n\t\t\tresp := hoverfly.ProxyWithAuth(sling.New().Get(\"http:\/\/hoverfly.io\"), \"incorrect\", \"incorrect\")\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusProxyAuthRequired))\n\t\t})\n\n\t\tIt(\"should return a 502 (no match in simulate mode) when trying to proxy with auth credentials\", func() {\n\t\t\tresp := hoverfly.ProxyWithAuth(sling.New().Get(\"http:\/\/hoverfly.io\"), username, password)\n\t\t\tExpect(resp.StatusCode).To(Equal(http.StatusBadGateway))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package koding\n\nimport (\n\t\"fmt\"\n\t\"koding\/db\/mongodb\"\n\t\"koding\/kites\/kloud\/klient\"\n\t\"strconv\"\n\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\n\t\"github.com\/koding\/kite\"\n\taws \"github.com\/koding\/kloud\/api\/amazon\"\n\t\"github.com\/koding\/kloud\/machinestate\"\n\t\"github.com\/koding\/kloud\/protocol\"\n\t\"github.com\/koding\/kloud\/provider\/amazon\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\ntype PlanChecker struct {\n\tapi *amazon.AmazonClient\n\tdb *mongodb.MongoDB\n\tmachine *protocol.Machine\n\tprovider *Provider\n\tkite *kite.Kite\n\tusername string\n\tlog logging.Logger\n}\n\n\/\/ PlanChecker creates and returns a new PlanChecker struct that is responsible\n\/\/ of checking various pieces of informations based on a Plan\nfunc (p *Provider) PlanChecker(opts *protocol.Machine) (*PlanChecker, error) {\n\ta, err := p.NewClient(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx := &PlanChecker{\n\t\tapi: a,\n\t\tprovider: p,\n\t\tdb: p.Session,\n\t\tkite: p.Kite,\n\t\tusername: opts.Builder[\"username\"].(string),\n\t\tlog: p.Log,\n\t\tmachine: opts,\n\t}\n\n\treturn ctx, nil\n}\n\n\/\/ Plan returns user's current plan\nfunc (p *PlanChecker) Plan() (Plan, error) {\n\treturn Free, nil\n}\n\nfunc (p *PlanChecker) AllowedInstances(wantInstance InstanceType) error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallowedInstances := plan.Limits().AllowedInstances\n\n\tp.log.Info(\"[%s] checking instance type. want: %s (plan: %s)\",\n\t\tp.machine.MachineId, wantInstance, plan)\n\n\tif _, ok := allowedInstances[wantInstance]; ok {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"not allowed to create instance type: %s\", wantInstance)\n}\n\n\/\/ AlwaysOn checks whether the given machine has reached the current plans\n\/\/ always on limit\nfunc (p *PlanChecker) AlwaysOn() error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmachineData, ok := p.machine.CurrentData.(*Machine)\n\tif !ok {\n\t\treturn fmt.Errorf(\"current data is malformed: %v\", p.machine.CurrentData)\n\t}\n\n\talwaysOnLimit := plan.Limits().AlwaysOn\n\n\talwaysOnEnabled := false\n\tif has, ok := p.machine.Builder[\"alwaysOn\"]; ok {\n\t\tif alwaysOnEnabled, ok = has.(bool); !ok {\n\t\t\treturn fmt.Errorf(\"alwaysOn data is malformed %v\", has)\n\t\t}\n\t} else {\n\t\t\/\/ it doesn't exists, so give access to continue\n\t\treturn nil\n\t}\n\n\t\/\/ disabled give access\n\tif !alwaysOnEnabled {\n\t\treturn nil\n\t}\n\n\tuser := machineData.Users[0]\n\n\t\/\/ get all machines that belongs to this user\n\talwaysOnMachines := 0\n\terr = p.db.Run(\"jMachines\", func(c *mgo.Collection) error {\n\t\talwaysOnMachines, err = c.Find(bson.M{\n\t\t\t\"users.id\": user.Id,\n\t\t}).Count()\n\n\t\treturn err\n\t})\n\n\t\/\/ if it's something else just return an error, needs to be fixed\n\tif err != nil && err != mgo.ErrNotFound {\n\t\treturn err\n\t}\n\n\tp.log.Info(\"[%s] checking alwaysOn limit. current alwaysOn count: %d (plan limit: %d, plan: %s)\",\n\t\tp.machine.MachineId, alwaysOnMachines, alwaysOnLimit, plan)\n\t\/\/ the user has still not reached the limit\n\tif alwaysOnMachines < alwaysOnLimit {\n\t\tp.log.Info(\"[%s] allowing user '%s'. current alwaysOn count: %d (plan limit: %d, plan: %s)\",\n\t\t\tp.machine.MachineId, p.username, alwaysOnMachines, alwaysOnLimit, plan)\n\t\treturn nil\n\t}\n\n\tp.log.Info(\"[%s] denying user '%s'. current alwaysOn count: %d (plan limit: %d, plan: %s)\",\n\t\tp.machine.MachineId, p.username, alwaysOnMachines, alwaysOnLimit, plan)\n\treturn fmt.Errorf(\"total alwaysOn limit has been reached\")\n}\n\n\/\/ Timeout checks whether the user has reached the current plan's inactivity timeout.\nfunc (p *PlanChecker) Timeout() error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ get the timeout from the plan in which the user belongs to\n\tplanTimeout := plan.Limits().Timeout\n\n\tmachineData, ok := p.machine.CurrentData.(*Machine)\n\tif !ok {\n\t\treturn fmt.Errorf(\"current data is malformed: %v\", p.machine.CurrentData)\n\t}\n\n\t\/\/ connect and get real time data directly from the machines klient\n\tklient, err := klient.New(p.kite, machineData.QueryString)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer klient.Close()\n\n\t\/\/ get the usage directly from the klient, which is the most predictable source\n\tusg, err := klient.Usage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.log.Info(\"[%s] machine [%s] is inactive for %s (plan limit: %s, plan: %s).\",\n\t\tmachineData.Id.Hex(), machineData.IpAddress, usg.InactiveDuration, planTimeout, plan)\n\n\t\/\/ It still have plenty of time to work, do not stop it\n\tif usg.InactiveDuration <= planTimeout {\n\t\treturn nil\n\t}\n\n\tp.log.Info(\"[%s] machine [%s] has reached current plan limit of %s (plan: %s). Shutting down...\",\n\t\tmachineData.Id.Hex(), machineData.IpAddress, usg.InactiveDuration, planTimeout, plan)\n\n\t\/\/ mark our state as stopping so others know what we are doing\n\tp.provider.UpdateState(machineData.Id.Hex(), machinestate.Stopping)\n\n\t\/\/ replace with the real and authenticated username\n\tp.machine.Builder[\"username\"] = klient.Username\n\n\t\/\/ Hasta la vista, baby!\n\terr = p.provider.Stop(p.machine)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update to final state too\n\treturn p.provider.UpdateState(machineData.Id.Hex(), machinestate.Stopped)\n}\n\n\/\/ Total checks whether the user has reached the current plan's limit of having\n\/\/ a total number numbers of machines. It returns an error if the limit is\n\/\/ reached or an unexplained error happaned.\nfunc (p *PlanChecker) Total() error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallowedMachines := plan.Limits().Total\n\n\tinstances, err := p.userInstances()\n\n\t\/\/ no match, allow to create instance\n\tif err == aws.ErrNoInstances {\n\t\tp.log.Info(\"[%s] allowing user '%s'. current machine count: %d (plan limit: %d, plan: %s)\",\n\t\t\tp.machine.MachineId, p.username, len(instances), allowedMachines, plan)\n\t\treturn nil\n\t}\n\n\t\/\/ if it's something else don't allow it until it's solved\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(instances) >= allowedMachines {\n\t\tp.log.Info(\"[%s] denying user '%s'. current machine count: %d (plan limit: %d, plan: %s)\",\n\t\t\tp.machine.MachineId, p.username, len(instances), allowedMachines, plan)\n\n\t\treturn fmt.Errorf(\"total limit of %d machines has been reached\", allowedMachines)\n\t}\n\n\tp.log.Info(\"[%s] allowing user '%s'. current machine count: %d (plan limit: %d, plan: %s)\",\n\t\tp.machine.MachineId, p.username, len(instances), allowedMachines, plan)\n\n\treturn nil\n}\n\n\/\/ Storage checks whether the user has reached the current plan's limit total\n\/\/ storage with the supplied wantStorage information. It returns an error if\n\/\/ the limit is reached or an unexplained error happaned.\nfunc (p *PlanChecker) Storage(wantStorage int) error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttotalStorage := plan.Limits().Storage\n\n\tinstances, err := p.userInstances()\n\n\t\/\/ i hate for loops too, but unfortunaly the responses are always in form\n\t\/\/ of slices\n\tcurrentStorage := 0\n\tfor _, instance := range instances {\n\t\tfor _, blockDevice := range instance.BlockDevices {\n\t\t\tvolumes, err := p.api.Client.Volumes([]string{blockDevice.VolumeId}, ec2.NewFilter())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, volume := range volumes.Volumes {\n\t\t\t\tvolumeStorage, err := strconv.Atoi(volume.Size)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tcurrentStorage += volumeStorage\n\t\t\t}\n\t\t}\n\t}\n\n\tp.log.Info(\"[%s] Checking storage. Current: %dGB. Want: %dGB (plan limit: %dGB, plan: %s)\",\n\t\tp.machine.MachineId, currentStorage, wantStorage, totalStorage, plan)\n\n\tif currentStorage+wantStorage > totalStorage {\n\t\treturn fmt.Errorf(\"total storage limit has been reached. Can use %dGB of %dGB (plan: %s)\",\n\t\t\ttotalStorage-currentStorage, totalStorage, plan)\n\t}\n\n\tp.log.Info(\"[%s] Allowing user '%s'. Current: %dGB. Want: %dGB (plan limit: %dGB, plan: %s)\",\n\t\tp.machine.MachineId, p.username, currentStorage, wantStorage, totalStorage, plan)\n\n\t\/\/ allow to create storage\n\treturn nil\n}\n\nfunc (p *PlanChecker) userInstances() ([]ec2.Instance, error) {\n\tfilter := ec2.NewFilter()\n\t\/\/ instances in Amazon have a `koding-user` tag with the username as the\n\t\/\/ value. We can easily find them acording to this tag\n\tfilter.Add(\"tag:koding-user\", p.username)\n\tfilter.Add(\"tag:koding-env\", p.kite.Config.Environment)\n\n\t\/\/ Anything except \"terminated\" and \"shutting-down\"\n\tfilter.Add(\"instance-state-name\", \"pending\", \"running\", \"stopping\", \"stopped\")\n\n\treturn p.api.InstancesByFilter(filter)\n\n}\n<commit_msg>kloud\/plans: now finalize the checker into an interface to make testing easy<commit_after>package koding\n\nimport (\n\t\"fmt\"\n\t\"koding\/db\/mongodb\"\n\t\"koding\/kites\/kloud\/klient\"\n\t\"strconv\"\n\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\n\t\"github.com\/koding\/kite\"\n\taws \"github.com\/koding\/kloud\/api\/amazon\"\n\t\"github.com\/koding\/kloud\/machinestate\"\n\t\"github.com\/koding\/kloud\/protocol\"\n\t\"github.com\/koding\/kloud\/provider\/amazon\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\n\/\/ Checker checks various aspects of a machine. It used to limit certain\n\/\/ aspects of a machine, such a the total machine or total storage.\ntype Checker interface {\n\t\/\/ Total checks whether the user has reached the current plan's limit of\n\t\/\/ having a total number numbers of machines. It returns an error if the\n\t\/\/ limit is reached or an unexplained error happaned.\n\tTotal() error\n\n\t\/\/ AlwaysOn checks whether the given machine has reached the current plans\n\t\/\/ always on limit\n\tAlwaysOn() error\n\n\t\/\/ Timeout checks whether the user has reached the current plan's\n\t\/\/ inactivity timeout.\n\tTimeout() error\n\n\t\/\/ Storage checks whether the user has reached the current plan's limit\n\t\/\/ total storage with the supplied wantStorage information. It returns an\n\t\/\/ error if the limit is reached or an unexplained error happaned.\n\tStorage(wantStorage int) error\n\n\t\/\/ AllowedInstances checks whether the given machine has the permisison to\n\t\/\/ create the given instance type\n\tAllowedInstances(wantInstance InstanceType) error\n}\n\ntype PlanChecker struct {\n\tapi *amazon.AmazonClient\n\tdb *mongodb.MongoDB\n\tmachine *protocol.Machine\n\tprovider *Provider\n\tkite *kite.Kite\n\tusername string\n\tlog logging.Logger\n}\n\n\/\/ PlanChecker creates and returns a new Checker interface that is responsible\n\/\/ of checking various pieces of informations based on a Plan\nfunc (p *Provider) PlanChecker(opts *protocol.Machine) (Checker, error) {\n\ta, err := p.NewClient(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchecker := &PlanChecker{\n\t\tapi: a,\n\t\tprovider: p,\n\t\tdb: p.Session,\n\t\tkite: p.Kite,\n\t\tusername: opts.Builder[\"username\"].(string),\n\t\tlog: p.Log,\n\t\tmachine: opts,\n\t}\n\n\treturn checker, nil\n}\n\n\/\/ Plan returns user's current plan\nfunc (p *PlanChecker) Plan() (Plan, error) {\n\treturn Free, nil\n}\n\nfunc (p *PlanChecker) AllowedInstances(wantInstance InstanceType) error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallowedInstances := plan.Limits().AllowedInstances\n\n\tp.log.Info(\"[%s] checking instance type. want: %s (plan: %s)\",\n\t\tp.machine.MachineId, wantInstance, plan)\n\n\tif _, ok := allowedInstances[wantInstance]; ok {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"not allowed to create instance type: %s\", wantInstance)\n}\n\nfunc (p *PlanChecker) AlwaysOn() error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmachineData, ok := p.machine.CurrentData.(*Machine)\n\tif !ok {\n\t\treturn fmt.Errorf(\"current data is malformed: %v\", p.machine.CurrentData)\n\t}\n\n\talwaysOnLimit := plan.Limits().AlwaysOn\n\n\talwaysOnEnabled := false\n\tif has, ok := p.machine.Builder[\"alwaysOn\"]; ok {\n\t\tif alwaysOnEnabled, ok = has.(bool); !ok {\n\t\t\treturn fmt.Errorf(\"alwaysOn data is malformed %v\", has)\n\t\t}\n\t} else {\n\t\t\/\/ it doesn't exists, so give access to continue\n\t\treturn nil\n\t}\n\n\t\/\/ disabled give access\n\tif !alwaysOnEnabled {\n\t\treturn nil\n\t}\n\n\tuser := machineData.Users[0]\n\n\t\/\/ get all machines that belongs to this user\n\talwaysOnMachines := 0\n\terr = p.db.Run(\"jMachines\", func(c *mgo.Collection) error {\n\t\talwaysOnMachines, err = c.Find(bson.M{\n\t\t\t\"users.id\": user.Id,\n\t\t}).Count()\n\n\t\treturn err\n\t})\n\n\t\/\/ if it's something else just return an error, needs to be fixed\n\tif err != nil && err != mgo.ErrNotFound {\n\t\treturn err\n\t}\n\n\tp.log.Info(\"[%s] checking alwaysOn limit. current alwaysOn count: %d (plan limit: %d, plan: %s)\",\n\t\tp.machine.MachineId, alwaysOnMachines, alwaysOnLimit, plan)\n\t\/\/ the user has still not reached the limit\n\tif alwaysOnMachines < alwaysOnLimit {\n\t\tp.log.Info(\"[%s] allowing user '%s'. current alwaysOn count: %d (plan limit: %d, plan: %s)\",\n\t\t\tp.machine.MachineId, p.username, alwaysOnMachines, alwaysOnLimit, plan)\n\t\treturn nil\n\t}\n\n\tp.log.Info(\"[%s] denying user '%s'. current alwaysOn count: %d (plan limit: %d, plan: %s)\",\n\t\tp.machine.MachineId, p.username, alwaysOnMachines, alwaysOnLimit, plan)\n\treturn fmt.Errorf(\"total alwaysOn limit has been reached\")\n}\n\nfunc (p *PlanChecker) Timeout() error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ get the timeout from the plan in which the user belongs to\n\tplanTimeout := plan.Limits().Timeout\n\n\tmachineData, ok := p.machine.CurrentData.(*Machine)\n\tif !ok {\n\t\treturn fmt.Errorf(\"current data is malformed: %v\", p.machine.CurrentData)\n\t}\n\n\t\/\/ connect and get real time data directly from the machines klient\n\tklient, err := klient.New(p.kite, machineData.QueryString)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer klient.Close()\n\n\t\/\/ get the usage directly from the klient, which is the most predictable source\n\tusg, err := klient.Usage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.log.Info(\"[%s] machine [%s] is inactive for %s (plan limit: %s, plan: %s).\",\n\t\tmachineData.Id.Hex(), machineData.IpAddress, usg.InactiveDuration, planTimeout, plan)\n\n\t\/\/ It still have plenty of time to work, do not stop it\n\tif usg.InactiveDuration <= planTimeout {\n\t\treturn nil\n\t}\n\n\tp.log.Info(\"[%s] machine [%s] has reached current plan limit of %s (plan: %s). Shutting down...\",\n\t\tmachineData.Id.Hex(), machineData.IpAddress, usg.InactiveDuration, planTimeout, plan)\n\n\t\/\/ mark our state as stopping so others know what we are doing\n\tp.provider.UpdateState(machineData.Id.Hex(), machinestate.Stopping)\n\n\t\/\/ replace with the real and authenticated username\n\tp.machine.Builder[\"username\"] = klient.Username\n\n\t\/\/ Hasta la vista, baby!\n\terr = p.provider.Stop(p.machine)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update to final state too\n\treturn p.provider.UpdateState(machineData.Id.Hex(), machinestate.Stopped)\n}\n\nfunc (p *PlanChecker) Total() error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallowedMachines := plan.Limits().Total\n\n\tinstances, err := p.userInstances()\n\n\t\/\/ no match, allow to create instance\n\tif err == aws.ErrNoInstances {\n\t\tp.log.Info(\"[%s] allowing user '%s'. current machine count: %d (plan limit: %d, plan: %s)\",\n\t\t\tp.machine.MachineId, p.username, len(instances), allowedMachines, plan)\n\t\treturn nil\n\t}\n\n\t\/\/ if it's something else don't allow it until it's solved\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(instances) >= allowedMachines {\n\t\tp.log.Info(\"[%s] denying user '%s'. current machine count: %d (plan limit: %d, plan: %s)\",\n\t\t\tp.machine.MachineId, p.username, len(instances), allowedMachines, plan)\n\n\t\treturn fmt.Errorf(\"total limit of %d machines has been reached\", allowedMachines)\n\t}\n\n\tp.log.Info(\"[%s] allowing user '%s'. current machine count: %d (plan limit: %d, plan: %s)\",\n\t\tp.machine.MachineId, p.username, len(instances), allowedMachines, plan)\n\n\treturn nil\n}\n\nfunc (p *PlanChecker) Storage(wantStorage int) error {\n\tplan, err := p.Plan()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttotalStorage := plan.Limits().Storage\n\n\tinstances, err := p.userInstances()\n\n\t\/\/ i hate for loops too, but unfortunaly the responses are always in form\n\t\/\/ of slices\n\tcurrentStorage := 0\n\tfor _, instance := range instances {\n\t\tfor _, blockDevice := range instance.BlockDevices {\n\t\t\tvolumes, err := p.api.Client.Volumes([]string{blockDevice.VolumeId}, ec2.NewFilter())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, volume := range volumes.Volumes {\n\t\t\t\tvolumeStorage, err := strconv.Atoi(volume.Size)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tcurrentStorage += volumeStorage\n\t\t\t}\n\t\t}\n\t}\n\n\tp.log.Info(\"[%s] Checking storage. Current: %dGB. Want: %dGB (plan limit: %dGB, plan: %s)\",\n\t\tp.machine.MachineId, currentStorage, wantStorage, totalStorage, plan)\n\n\tif currentStorage+wantStorage > totalStorage {\n\t\treturn fmt.Errorf(\"total storage limit has been reached. Can use %dGB of %dGB (plan: %s)\",\n\t\t\ttotalStorage-currentStorage, totalStorage, plan)\n\t}\n\n\tp.log.Info(\"[%s] Allowing user '%s'. Current: %dGB. Want: %dGB (plan limit: %dGB, plan: %s)\",\n\t\tp.machine.MachineId, p.username, currentStorage, wantStorage, totalStorage, plan)\n\n\t\/\/ allow to create storage\n\treturn nil\n}\n\nfunc (p *PlanChecker) userInstances() ([]ec2.Instance, error) {\n\tfilter := ec2.NewFilter()\n\t\/\/ instances in Amazon have a `koding-user` tag with the username as the\n\t\/\/ value. We can easily find them acording to this tag\n\tfilter.Add(\"tag:koding-user\", p.username)\n\tfilter.Add(\"tag:koding-env\", p.kite.Config.Environment)\n\n\t\/\/ Anything except \"terminated\" and \"shutting-down\"\n\tfilter.Add(\"instance-state-name\", \"pending\", \"running\", \"stopping\", \"stopped\")\n\n\treturn p.api.InstancesByFilter(filter)\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sync\n\nfunc cas(val *int32, old, new int32) bool\nfunc semacquire(*int32)\nfunc semrelease(*int32)\n\ntype Mutex struct {\n\tkey int32;\n\tsema int32;\n}\n\nfunc xadd(val *int32, delta int32) (new int32) {\n\tfor {\n\t\tv := *val;\n\t\tif cas(val, v, v+delta) {\n\t\t\treturn v+delta;\n\t\t}\n\t}\n\tpanic(\"unreached\")\n}\n\nfunc (m *Mutex) Lock() {\n\tif xadd(&m.key, 1) == 1 {\n\t\t\/\/ changed from 0 to 1; we hold lock\n\t\treturn;\n\t}\n\tsemacquire(&m.sema);\n}\n\nfunc (m *Mutex) Unlock() {\n\tif xadd(&m.key, -1) == 0 {\n\t\t\/\/ changed from 1 to 0; no contention\n\t\treturn;\n\t}\n\tsemrelease(&m.sema);\n}\n\n\/\/ Stub implementation of r\/w locks.\n\/\/ This satisfies the semantics but\n\/\/ is not terribly efficient.\n\/\/ TODO(rsc): Real r\/w locks.\n\ntype RWMutex struct {\n\tMutex;\n}\n\nfunc (m *RWMutex) RLock() {\n\tm.Lock();\n}\n\nfunc (m *RWMutex) RUnlock() {\n\tm.Unlock();\n}\n\n<commit_msg>sync: add documentation<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The sync package provides basic synchronization primitives\n\/\/ such as mutual exclusion locks. These are intended for use\n\/\/ by low-level library routines. Higher-level synchronization\n\/\/ is better done via channels and communication.\npackage sync\n\nfunc cas(val *int32, old, new int32) bool\nfunc semacquire(*int32)\nfunc semrelease(*int32)\n\n\/\/ A Mutex is a mutual exclusion lock.\n\/\/ Mutexes can be created as part of other structures;\n\/\/ the zero value for a Mutex is an unlocked mutex.\ntype Mutex struct {\n\tkey int32;\n\tsema int32;\n}\n\nfunc xadd(val *int32, delta int32) (new int32) {\n\tfor {\n\t\tv := *val;\n\t\tif cas(val, v, v+delta) {\n\t\t\treturn v+delta;\n\t\t}\n\t}\n\tpanic(\"unreached\")\n}\n\n\/\/ Lock locks m.\n\/\/ If the lock is already in use, the calling goroutine\n\/\/ blocks until the mutex is available.\nfunc (m *Mutex) Lock() {\n\tif xadd(&m.key, 1) == 1 {\n\t\t\/\/ changed from 0 to 1; we hold lock\n\t\treturn;\n\t}\n\tsemacquire(&m.sema);\n}\n\n\/\/ Unlock unlocks m.\n\/\/ It is a run-time error if m is not locked on entry to Unlock.\n\/\/\n\/\/ A locked Mutex is not associated with a particular goroutine.\n\/\/ It is allowed for one goroutine to lock a Mutex and then\n\/\/ arrange for another goroutine to unlock it.\nfunc (m *Mutex) Unlock() {\n\tif xadd(&m.key, -1) == 0 {\n\t\t\/\/ changed from 1 to 0; no contention\n\t\treturn;\n\t}\n\tsemrelease(&m.sema);\n}\n\n\/\/ Stub implementation of r\/w locks.\n\/\/ This satisfies the semantics but\n\/\/ is not terribly efficient.\n\n\/\/ The next comment goes in the BUGS section of the document,\n\/\/ in its own paragraph, without the (rsc) tag.\n\n\/\/ BUG(rsc): RWMutex does not (yet) allow multiple readers;\n\/\/ instead it behaves as if RLock and RUnlock were Lock and Unlock.\n\n\/\/ An RWMutex is a reader\/writer mutual exclusion lock.\n\/\/ The lock can be held by an arbitrary number of readers\n\/\/ or a single writer.\n\/\/ RWMutexes can be created as part of other\n\/\/ structures; the zero value for a RWMutex is\n\/\/ an unlocked mutex.\ntype RWMutex struct {\n\tm Mutex;\n}\n\n\/\/ RLock locks rw for reading.\n\/\/ If the lock is already locked for writing or there is a writer already waiting\n\/\/ to acquire the lock, RLock blocks until the writer has released the lock.\nfunc (rw *RWMutex) RLock() {\n\trw.m.Lock();\n}\n\n\/\/ RUnlock undoes a single RLock call;\n\/\/ it does not affect other simultaneous readers.\n\/\/ It is a run-time error if rw is not locked for reading\n\/\/ on entry to RUnlock.\nfunc (rw *RWMutex) RUnlock() {\n\trw.m.Unlock();\n}\n\n\/\/ Lock locks rw for writing.\n\/\/ If the lock is already locked for reading or writing,\n\/\/ Lock blocks until the lock is available.\n\/\/ To ensure that the lock eventually becomes available,\n\/\/ a blocked Lock call excludes new readers from acquiring\n\/\/ the lock.\nfunc (rw *RWMutex) Lock() {\n\trw.m.Lock();\n}\n\n\/\/ Unlock unlocks rw for writing.\n\/\/ It is a run-time error if rw is not locked for writing\n\/\/ on entry to Unlock.\n\/\/\n\/\/ Like for Mutexes,\n\/\/ a locked RWMutex is not associated with a particular goroutine.\n\/\/ It is allowed for one goroutine to RLock (Lock) an RWMutex and then\n\/\/ arrange for another goroutine to RUnlock (Unlock) it.\nfunc (rw *RWMutex) Unlock() {\n\trw.m.Unlock();\n}\n\n<|endoftext|>"} {"text":"<commit_before>package alert\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\thelper \"github.com\/openshift\/origin\/test\/extended\/util\/prometheus\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/upgrades\"\n)\n\nconst (\n\t\/\/ Delay after upgrade is complete before checking for critical alerts\n\talertCheckSleepMinutes = 5\n\talertCheckSleep = alertCheckSleepMinutes * time.Minute\n\n\t\/\/ Previous period in which to check for critical alerts\n\talertPeriodCheckMinutes = 1\n)\n\n\/\/ UpgradeTest runs post-upgrade after alertCheckSleep delay and tests if any critical alerts are firing.\ntype UpgradeTest struct {\n\turl string\n\tbearerToken string\n\toc *exutil.CLI\n}\n\nfunc (UpgradeTest) Name() string { return \"check-for-critical-alerts\" }\nfunc (UpgradeTest) DisplayName() string {\n\treturn \"[sig-arch] Check if critical alerts are firing after upgrade success\"\n}\n\n\/\/ Setup creates parameters to query Prometheus\nfunc (t *UpgradeTest) Setup(f *framework.Framework) {\n\tg.By(\"Setting up post-upgrade alert test\")\n\n\turl, bearerToken, oc, ok := helper.ExpectPrometheus(f)\n\tif !ok {\n\t\tframework.Failf(\"Prometheus could not be located on this cluster, failing test %s\", t.Name())\n\t}\n\tt.url = url\n\tt.bearerToken = bearerToken\n\tt.oc = oc\n\tframework.Logf(\"Post-upgrade alert test setup complete\")\n}\n\n\/\/ Test checks if any critical alerts are firing.\nfunc (t *UpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {\n\tg.By(\"Checking for critical alerts\")\n\n\t\/\/ Recover current test if it fails so test suite can complete\n\tdefer g.GinkgoRecover()\n\n\t\/\/ Block until upgrade is done\n\tg.By(\"Waiting for upgrade to finish before checking for critical alerts\")\n\t<-done\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t\/\/ Additonal delay after upgrade completion\n\tg.By(\"Waiting before checking for critical alerts\")\n\ttime.Sleep(alertCheckSleep)\n\tcancel()\n\n\tif helper.TestUnsupportedAllowVersionSkew() {\n\t\te2eskipper.Skipf(\"Test is disabled to allow cluster components to have different versions, and skewed versions trigger multiple other alerts\")\n\t}\n\tt.oc.SetupProject()\n\tns := t.oc.Namespace()\n\texecPod := exutil.CreateCentosExecPodOrFail(t.oc.AdminKubeClient(), ns, \"execpod\", nil)\n\tdefer func() {\n\t\tt.oc.AdminKubeClient().CoreV1().Pods(ns).Delete(ctx, execPod.Name, *metav1.NewDeleteOptions(1))\n\t}()\n\n\t\/\/ Query to check if Prometheus has been up and running for entire post-upgrade\n\t\/\/ period by verifying Watchdog alert has been in firing state\n\twatchdogQuery := fmt.Sprintf(`count_over_time(ALERTS{alertstate=\"firing\",alertname=\"Watchdog\", severity=\"none\"}[%dm])`, alertCheckSleepMinutes)\n\n\t\/\/ Query to check for any critical severity alerts that have occurred within the last alertPeriodCheckMinutes.\n\t\/\/ TODO Remove KubeAPIErrorBudgetBurn from ignore list once Bug 1821661 is fixed.\n\tcriticalAlertQuery := fmt.Sprintf(`count_over_time(ALERTS{alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured|KubeAPILatencyHigh|KubeAPIErrorBudgetBurn\",alertstate=\"firing\",severity=\"critical\"}[%dm]) >= 1`, alertPeriodCheckMinutes)\n\n\ttests := map[string]bool{\n\t\twatchdogQuery: true,\n\t\tcriticalAlertQuery: false,\n\t}\n\n\thelper.RunQueries(tests, t.oc, ns, execPod.Name, t.url, t.bearerToken)\n\n\tframework.Logf(\"No critical alerts firing post-upgrade\")\n}\n\n\/\/ Teardown cleans up any remaining resources.\nfunc (t *UpgradeTest) Teardown(f *framework.Framework) {\n\t\/\/ rely on the namespace deletion to clean up everything\n}\n<commit_msg>Revert \"Ignore KubeAPIErrorBudgetBurn alert\"<commit_after>package alert\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\thelper \"github.com\/openshift\/origin\/test\/extended\/util\/prometheus\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/upgrades\"\n)\n\nconst (\n\t\/\/ Delay after upgrade is complete before checking for critical alerts\n\talertCheckSleepMinutes = 5\n\talertCheckSleep = alertCheckSleepMinutes * time.Minute\n\n\t\/\/ Previous period in which to check for critical alerts\n\talertPeriodCheckMinutes = 1\n)\n\n\/\/ UpgradeTest runs post-upgrade after alertCheckSleep delay and tests if any critical alerts are firing.\ntype UpgradeTest struct {\n\turl string\n\tbearerToken string\n\toc *exutil.CLI\n}\n\nfunc (UpgradeTest) Name() string { return \"check-for-critical-alerts\" }\nfunc (UpgradeTest) DisplayName() string {\n\treturn \"[sig-arch] Check if critical alerts are firing after upgrade success\"\n}\n\n\/\/ Setup creates parameters to query Prometheus\nfunc (t *UpgradeTest) Setup(f *framework.Framework) {\n\tg.By(\"Setting up post-upgrade alert test\")\n\n\turl, bearerToken, oc, ok := helper.ExpectPrometheus(f)\n\tif !ok {\n\t\tframework.Failf(\"Prometheus could not be located on this cluster, failing test %s\", t.Name())\n\t}\n\tt.url = url\n\tt.bearerToken = bearerToken\n\tt.oc = oc\n\tframework.Logf(\"Post-upgrade alert test setup complete\")\n}\n\n\/\/ Test checks if any critical alerts are firing.\nfunc (t *UpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {\n\tg.By(\"Checking for critical alerts\")\n\n\t\/\/ Recover current test if it fails so test suite can complete\n\tdefer g.GinkgoRecover()\n\n\t\/\/ Block until upgrade is done\n\tg.By(\"Waiting for upgrade to finish before checking for critical alerts\")\n\t<-done\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t\/\/ Additonal delay after upgrade completion\n\tg.By(\"Waiting before checking for critical alerts\")\n\ttime.Sleep(alertCheckSleep)\n\tcancel()\n\n\tif helper.TestUnsupportedAllowVersionSkew() {\n\t\te2eskipper.Skipf(\"Test is disabled to allow cluster components to have different versions, and skewed versions trigger multiple other alerts\")\n\t}\n\tt.oc.SetupProject()\n\tns := t.oc.Namespace()\n\texecPod := exutil.CreateCentosExecPodOrFail(t.oc.AdminKubeClient(), ns, \"execpod\", nil)\n\tdefer func() {\n\t\tt.oc.AdminKubeClient().CoreV1().Pods(ns).Delete(ctx, execPod.Name, *metav1.NewDeleteOptions(1))\n\t}()\n\n\t\/\/ Query to check if Prometheus has been up and running for entire post-upgrade\n\t\/\/ period by verifying Watchdog alert has been in firing state\n\twatchdogQuery := fmt.Sprintf(`count_over_time(ALERTS{alertstate=\"firing\",alertname=\"Watchdog\", severity=\"none\"}[%dm])`, alertCheckSleepMinutes)\n\n\t\/\/ Query to check for any critical severity alerts that have occurred within the last alertPeriodCheckMinutes.\n\tcriticalAlertQuery := fmt.Sprintf(`count_over_time(ALERTS{alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured|KubeAPILatencyHigh\",alertstate=\"firing\",severity=\"critical\"}[%dm]) >= 1`, alertPeriodCheckMinutes)\n\n\ttests := map[string]bool{\n\t\twatchdogQuery: true,\n\t\tcriticalAlertQuery: false,\n\t}\n\n\thelper.RunQueries(tests, t.oc, ns, execPod.Name, t.url, t.bearerToken)\n\n\tframework.Logf(\"No critical alerts firing post-upgrade\")\n}\n\n\/\/ Teardown cleans up any remaining resources.\nfunc (t *UpgradeTest) Teardown(f *framework.Framework) {\n\t\/\/ rely on the namespace deletion to clean up everything\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\tc \"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-check\"\n\t\"github.com\/flynn\/flynn\/controller\/client\"\n\tct \"github.com\/flynn\/flynn\/controller\/types\"\n\t\"github.com\/flynn\/flynn\/pkg\/dialer\"\n)\n\n\/\/ Prefix the suite with \"Z\" so that it runs after all other tests because\n\/\/ if it fails, all other tests after it will be affected\ntype ZDomainMigrationSuite struct {\n\tHelper\n}\n\nvar _ = c.Suite(&ZDomainMigrationSuite{})\n\nfunc (s *ZDomainMigrationSuite) migrateDomain(t *c.C, dm *ct.DomainMigration) {\n\tdebugf(t, \"migrating domain from %s to %s\", dm.OldDomain, dm.Domain)\n\tclient := s.controllerClient(t)\n\n\tevents := make(chan *ct.Event)\n\tstream, err := client.StreamEvents(controller.StreamEventsOptions{\n\t\tObjectTypes: []ct.EventType{ct.EventTypeDomainMigration},\n\t}, events)\n\tt.Assert(err, c.IsNil)\n\tdefer stream.Close()\n\n\tprevRouterRelease, err := client.GetAppRelease(\"router\")\n\tt.Assert(err, c.IsNil)\n\n\terr = client.PutDomain(dm)\n\tt.Assert(err, c.IsNil)\n\n\twaitEvent := func(typ string, timeout time.Duration) (event ct.DomainMigrationEvent) {\n\t\tdebugf(t, \"waiting for %s domain migration event\", typ)\n\t\tvar e *ct.Event\n\t\tvar ok bool\n\t\tselect {\n\t\tcase e, ok = <-events:\n\t\t\tif !ok {\n\t\t\t\tt.Fatal(\"event stream closed unexpectedly\")\n\t\t\t}\n\t\t\tdebugf(t, \"got %s domain migration event\", typ)\n\t\tcase <-time.After(timeout):\n\t\t\tt.Fatalf(\"timed out waiting for %s domain migration event\", typ)\n\t\t}\n\t\tt.Assert(e.Data, c.NotNil)\n\t\tt.Assert(json.Unmarshal(e.Data, &event), c.IsNil)\n\t\treturn\n\t}\n\n\t\/\/ created\n\tevent := waitEvent(\"initial\", 2*time.Minute)\n\tt.Assert(event.Error, c.Equals, \"\")\n\tt.Assert(event.DomainMigration, c.NotNil)\n\tt.Assert(event.DomainMigration.ID, c.Equals, dm.ID)\n\tt.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain)\n\tt.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain)\n\tt.Assert(event.DomainMigration.TLSCert, c.IsNil)\n\tt.Assert(event.DomainMigration.OldTLSCert, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true)\n\tt.Assert(event.DomainMigration.FinishedAt, c.IsNil)\n\n\t\/\/ complete\n\tevent = waitEvent(\"final\", 3*time.Minute)\n\tt.Assert(event.Error, c.Equals, \"\")\n\tt.Assert(event.DomainMigration, c.NotNil)\n\tt.Assert(event.DomainMigration.ID, c.Equals, dm.ID)\n\tt.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain)\n\tt.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain)\n\tt.Assert(event.DomainMigration.TLSCert, c.NotNil)\n\tt.Assert(event.DomainMigration.OldTLSCert, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true)\n\tt.Assert(event.DomainMigration.FinishedAt, c.NotNil)\n\n\tcert := event.DomainMigration.TLSCert\n\n\tcontrollerRelease, err := client.GetAppRelease(\"controller\")\n\tt.Assert(err, c.IsNil)\n\tt.Assert(controllerRelease.Env[\"DEFAULT_ROUTE_DOMAIN\"], c.Equals, dm.Domain)\n\tt.Assert(controllerRelease.Env[\"CA_CERT\"], c.Equals, cert.CACert)\n\n\trouterRelease, err := client.GetAppRelease(\"router\")\n\tt.Assert(err, c.IsNil)\n\tt.Assert(routerRelease.Env[\"TLSCERT\"], c.Equals, cert.Cert)\n\tt.Assert(routerRelease.Env[\"TLSKEY\"], c.Not(c.Equals), \"\")\n\tt.Assert(routerRelease.Env[\"TLSKEY\"], c.Not(c.Equals), prevRouterRelease.Env[\"TLSKEY\"])\n\n\tdashboardRelease, err := client.GetAppRelease(\"dashboard\")\n\tt.Assert(err, c.IsNil)\n\tt.Assert(dashboardRelease.Env[\"DEFAULT_ROUTE_DOMAIN\"], c.Equals, dm.Domain)\n\tt.Assert(dashboardRelease.Env[\"CONTROLLER_DOMAIN\"], c.Equals, fmt.Sprintf(\"controller.%s\", dm.Domain))\n\tt.Assert(dashboardRelease.Env[\"URL\"], c.Equals, fmt.Sprintf(\"https:\/\/dashboard.%s\", dm.Domain))\n\tt.Assert(dashboardRelease.Env[\"CA_CERT\"], c.Equals, cert.CACert)\n\n\tvar doPing func(string, int)\n\tdoPing = func(component string, retriesRemaining int) {\n\t\turl := fmt.Sprintf(\"http:\/\/%s.%s\/ping\", component, dm.Domain)\n\t\thttpClient := &http.Client{Transport: &http.Transport{Dial: dialer.Retry.Dial}}\n\t\tres, err := httpClient.Get(url)\n\t\tif (err != nil || res.StatusCode != 200) && retriesRemaining > 0 {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tdoPing(component, retriesRemaining-1)\n\t\t\treturn\n\t\t}\n\t\tt.Assert(err, c.IsNil)\n\t\tt.Assert(res.StatusCode, c.Equals, 200, c.Commentf(\"failed to ping %s\", component))\n\t}\n\tdoPing(\"controller\", 3)\n\tdoPing(\"dashboard\", 3)\n}\n\nfunc (s *ZDomainMigrationSuite) TestDomainMigration(t *c.C) {\n\trelease, err := s.controllerClient(t).GetAppRelease(\"controller\")\n\tt.Assert(err, c.IsNil)\n\toldDomain := release.Env[\"DEFAULT_ROUTE_DOMAIN\"]\n\n\t\/\/ using xip.io to get around modifying \/etc\/hosts\n\tdm := &ct.DomainMigration{\n\t\tOldDomain: oldDomain,\n\t\tDomain: fmt.Sprintf(\"%s.xip.io\", routerIP),\n\t}\n\ts.migrateDomain(t, dm)\n\ts.migrateDomain(t, &ct.DomainMigration{\n\t\tOldDomain: dm.Domain,\n\t\tDomain: dm.OldDomain,\n\t})\n}\n<commit_msg>test: Log domain migration stream error<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\tc \"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-check\"\n\t\"github.com\/flynn\/flynn\/controller\/client\"\n\tct \"github.com\/flynn\/flynn\/controller\/types\"\n\t\"github.com\/flynn\/flynn\/pkg\/dialer\"\n)\n\n\/\/ Prefix the suite with \"Z\" so that it runs after all other tests because\n\/\/ if it fails, all other tests after it will be affected\ntype ZDomainMigrationSuite struct {\n\tHelper\n}\n\nvar _ = c.Suite(&ZDomainMigrationSuite{})\n\nfunc (s *ZDomainMigrationSuite) migrateDomain(t *c.C, dm *ct.DomainMigration) {\n\tdebugf(t, \"migrating domain from %s to %s\", dm.OldDomain, dm.Domain)\n\tclient := s.controllerClient(t)\n\n\tevents := make(chan *ct.Event)\n\tstream, err := client.StreamEvents(controller.StreamEventsOptions{\n\t\tObjectTypes: []ct.EventType{ct.EventTypeDomainMigration},\n\t}, events)\n\tt.Assert(err, c.IsNil)\n\tdefer stream.Close()\n\n\tprevRouterRelease, err := client.GetAppRelease(\"router\")\n\tt.Assert(err, c.IsNil)\n\n\terr = client.PutDomain(dm)\n\tt.Assert(err, c.IsNil)\n\n\twaitEvent := func(typ string, timeout time.Duration) (event ct.DomainMigrationEvent) {\n\t\tdebugf(t, \"waiting for %s domain migration event\", typ)\n\t\tvar e *ct.Event\n\t\tvar ok bool\n\t\tselect {\n\t\tcase e, ok = <-events:\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"event stream closed unexpectedly: %s\", stream.Err())\n\t\t\t}\n\t\t\tdebugf(t, \"got %s domain migration event\", typ)\n\t\tcase <-time.After(timeout):\n\t\t\tt.Fatalf(\"timed out waiting for %s domain migration event\", typ)\n\t\t}\n\t\tt.Assert(e.Data, c.NotNil)\n\t\tt.Assert(json.Unmarshal(e.Data, &event), c.IsNil)\n\t\treturn\n\t}\n\n\t\/\/ created\n\tevent := waitEvent(\"initial\", 2*time.Minute)\n\tt.Assert(event.Error, c.Equals, \"\")\n\tt.Assert(event.DomainMigration, c.NotNil)\n\tt.Assert(event.DomainMigration.ID, c.Equals, dm.ID)\n\tt.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain)\n\tt.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain)\n\tt.Assert(event.DomainMigration.TLSCert, c.IsNil)\n\tt.Assert(event.DomainMigration.OldTLSCert, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true)\n\tt.Assert(event.DomainMigration.FinishedAt, c.IsNil)\n\n\t\/\/ complete\n\tevent = waitEvent(\"final\", 3*time.Minute)\n\tt.Assert(event.Error, c.Equals, \"\")\n\tt.Assert(event.DomainMigration, c.NotNil)\n\tt.Assert(event.DomainMigration.ID, c.Equals, dm.ID)\n\tt.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain)\n\tt.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain)\n\tt.Assert(event.DomainMigration.TLSCert, c.NotNil)\n\tt.Assert(event.DomainMigration.OldTLSCert, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt, c.NotNil)\n\tt.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true)\n\tt.Assert(event.DomainMigration.FinishedAt, c.NotNil)\n\n\tcert := event.DomainMigration.TLSCert\n\n\tcontrollerRelease, err := client.GetAppRelease(\"controller\")\n\tt.Assert(err, c.IsNil)\n\tt.Assert(controllerRelease.Env[\"DEFAULT_ROUTE_DOMAIN\"], c.Equals, dm.Domain)\n\tt.Assert(controllerRelease.Env[\"CA_CERT\"], c.Equals, cert.CACert)\n\n\trouterRelease, err := client.GetAppRelease(\"router\")\n\tt.Assert(err, c.IsNil)\n\tt.Assert(routerRelease.Env[\"TLSCERT\"], c.Equals, cert.Cert)\n\tt.Assert(routerRelease.Env[\"TLSKEY\"], c.Not(c.Equals), \"\")\n\tt.Assert(routerRelease.Env[\"TLSKEY\"], c.Not(c.Equals), prevRouterRelease.Env[\"TLSKEY\"])\n\n\tdashboardRelease, err := client.GetAppRelease(\"dashboard\")\n\tt.Assert(err, c.IsNil)\n\tt.Assert(dashboardRelease.Env[\"DEFAULT_ROUTE_DOMAIN\"], c.Equals, dm.Domain)\n\tt.Assert(dashboardRelease.Env[\"CONTROLLER_DOMAIN\"], c.Equals, fmt.Sprintf(\"controller.%s\", dm.Domain))\n\tt.Assert(dashboardRelease.Env[\"URL\"], c.Equals, fmt.Sprintf(\"https:\/\/dashboard.%s\", dm.Domain))\n\tt.Assert(dashboardRelease.Env[\"CA_CERT\"], c.Equals, cert.CACert)\n\n\tvar doPing func(string, int)\n\tdoPing = func(component string, retriesRemaining int) {\n\t\turl := fmt.Sprintf(\"http:\/\/%s.%s\/ping\", component, dm.Domain)\n\t\thttpClient := &http.Client{Transport: &http.Transport{Dial: dialer.Retry.Dial}}\n\t\tres, err := httpClient.Get(url)\n\t\tif (err != nil || res.StatusCode != 200) && retriesRemaining > 0 {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tdoPing(component, retriesRemaining-1)\n\t\t\treturn\n\t\t}\n\t\tt.Assert(err, c.IsNil)\n\t\tt.Assert(res.StatusCode, c.Equals, 200, c.Commentf(\"failed to ping %s\", component))\n\t}\n\tdoPing(\"controller\", 3)\n\tdoPing(\"dashboard\", 3)\n}\n\nfunc (s *ZDomainMigrationSuite) TestDomainMigration(t *c.C) {\n\trelease, err := s.controllerClient(t).GetAppRelease(\"controller\")\n\tt.Assert(err, c.IsNil)\n\toldDomain := release.Env[\"DEFAULT_ROUTE_DOMAIN\"]\n\n\t\/\/ using xip.io to get around modifying \/etc\/hosts\n\tdm := &ct.DomainMigration{\n\t\tOldDomain: oldDomain,\n\t\tDomain: fmt.Sprintf(\"%s.xip.io\", routerIP),\n\t}\n\ts.migrateDomain(t, dm)\n\ts.migrateDomain(t, &ct.DomainMigration{\n\t\tOldDomain: dm.Domain,\n\t\tDomain: dm.OldDomain,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage wallet\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com\/FactomProject\/btcutil\/base58\"\n\t\"github.com\/FactomProject\/factom\"\n\t\"github.com\/FactomProject\/factomd\/common\/factoid\"\n\t\"github.com\/FactomProject\/factomd\/common\/interfaces\"\n\t\"github.com\/FactomProject\/factomd\/common\/primitives\"\n)\n\nvar (\n\tErrTXExists = errors.New(\"wallet: Transaction name already exists\")\n\tErrTXNotExists = errors.New(\"wallet: Transaction name was not found\")\n\tErrTXInvalidName = errors.New(\"wallet: Transaction name is not valid\")\n)\n\nfunc (w *Wallet) NewTransaction(name string) error {\n\tif _, exist := w.transactions[name]; exist {\n\t\treturn ErrTXExists\n\t}\n\n\t\/\/ check that the transaction name is valid\n\tif name == \"\" {\n\t\treturn ErrTXInvalidName\n\t}\n\tif len(name) > 32 {\n\t\treturn ErrTXInvalidName\n\t}\n\tif match, err := regexp.MatchString(\"[^a-zA-Z0-9_-]\", name); err != nil {\n\t\treturn err\n\t} else if match {\n\t\treturn ErrTXInvalidName\n\t}\n\n\tt := new(factoid.Transaction)\n\tt.SetTimestamp(*interfaces.NewTimestampNow())\n\tw.transactions[name] = t\n\treturn nil\n}\n\nfunc (w *Wallet) DeleteTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\tdelete(w.transactions, name)\n\treturn nil\n}\n\nfunc (w *Wallet) AddInput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\t\/\/ First look if this is really an update\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tinput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Add our new input\n\ttrans.AddInput(adr, amount)\n\ttrans.AddRCD(factoid.NewRCD_1(a.PubBytes()))\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddECOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddECOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tamt, err := factoid.ValidateAmounts(input.GetAmount(), transfee)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tinput.SetAmount(amt)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an input to the transaction.\", address)\n}\n\nfunc (w *Wallet) SubFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(output.GetAmount() - transfee)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an output to the transaction.\", address)\n}\n\nfunc (w *Wallet) SignTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tdata, err := trans.MarshalBinarySig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, rcd := range trans.GetRCDs() {\n\t\ta, err := rcd.GetAddress()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := w.GetFCTAddress(primitives.ConvertFctAddressToUserStr(a))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsig := factoid.NewSingleSignatureBlock(f.SecBytes(), data)\n\t\ttrans.SetSignatureBlock(i, sig)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wallet) GetTransactions() map[string]*factoid.Transaction {\n\treturn w.transactions\n}\n\nfunc (w *Wallet) ComposeTransaction(name string) (*factom.JSON2Request, error) {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn nil, ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ttype txreq struct {\n\t\tTransaction string `json:\"transaction\"`\n\t}\n\n\tparam := new(txreq)\n\tif p, err := trans.MarshalBinary(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tparam.Transaction = hex.EncodeToString(p)\n\t}\n\n\treq := factom.NewJSON2Request(\"factoid-submit\", apiCounter(), param)\n\n\treturn req, nil\n}\n<commit_msg>fixed error for address not found in wallet<commit_after>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage wallet\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com\/FactomProject\/btcutil\/base58\"\n\t\"github.com\/FactomProject\/factom\"\n\t\"github.com\/FactomProject\/factomd\/common\/factoid\"\n\t\"github.com\/FactomProject\/factomd\/common\/interfaces\"\n\t\"github.com\/FactomProject\/factomd\/common\/primitives\"\n\t\"github.com\/FactomProject\/goleveldb\/leveldb\"\n)\n\nvar (\n\tErrTXExists = errors.New(\"wallet: Transaction name already exists\")\n\tErrTXNotExists = errors.New(\"wallet: Transaction name was not found\")\n\tErrTXInvalidName = errors.New(\"wallet: Transaction name is not valid\")\n)\n\nfunc (w *Wallet) NewTransaction(name string) error {\n\tif _, exist := w.transactions[name]; exist {\n\t\treturn ErrTXExists\n\t}\n\n\t\/\/ check that the transaction name is valid\n\tif name == \"\" {\n\t\treturn ErrTXInvalidName\n\t}\n\tif len(name) > 32 {\n\t\treturn ErrTXInvalidName\n\t}\n\tif match, err := regexp.MatchString(\"[^a-zA-Z0-9_-]\", name); err != nil {\n\t\treturn err\n\t} else if match {\n\t\treturn ErrTXInvalidName\n\t}\n\n\tt := new(factoid.Transaction)\n\tt.SetTimestamp(*interfaces.NewTimestampNow())\n\tw.transactions[name] = t\n\treturn nil\n}\n\nfunc (w *Wallet) DeleteTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\tdelete(w.transactions, name)\n\treturn nil\n}\n\nfunc (w *Wallet) AddInput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ta, err := w.GetFCTAddress(address)\n\tif err == leveldb.ErrNotFound {\n\t\treturn errors.New(\"No such address in the wallet\")\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\t\/\/ First look if this is really an update\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tinput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Add our new input\n\ttrans.AddInput(adr, amount)\n\ttrans.AddRCD(factoid.NewRCD_1(a.PubBytes()))\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddECOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddECOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tamt, err := factoid.ValidateAmounts(input.GetAmount(), transfee)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tinput.SetAmount(amt)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an input to the transaction.\", address)\n}\n\nfunc (w *Wallet) SubFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(output.GetAmount() - transfee)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an output to the transaction.\", address)\n}\n\nfunc (w *Wallet) SignTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tdata, err := trans.MarshalBinarySig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, rcd := range trans.GetRCDs() {\n\t\ta, err := rcd.GetAddress()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := w.GetFCTAddress(primitives.ConvertFctAddressToUserStr(a))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsig := factoid.NewSingleSignatureBlock(f.SecBytes(), data)\n\t\ttrans.SetSignatureBlock(i, sig)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wallet) GetTransactions() map[string]*factoid.Transaction {\n\treturn w.transactions\n}\n\nfunc (w *Wallet) ComposeTransaction(name string) (*factom.JSON2Request, error) {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn nil, ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ttype txreq struct {\n\t\tTransaction string `json:\"transaction\"`\n\t}\n\n\tparam := new(txreq)\n\tif p, err := trans.MarshalBinary(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tparam.Transaction = hex.EncodeToString(p)\n\t}\n\n\treq := factom.NewJSON2Request(\"factoid-submit\", apiCounter(), param)\n\n\treturn req, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage wallet\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com\/FactomProject\/btcutil\/base58\"\n\t\"github.com\/FactomProject\/factom\"\n\t\"github.com\/FactomProject\/factomd\/common\/factoid\"\n\t\"github.com\/FactomProject\/factomd\/common\/primitives\"\n\t\"github.com\/FactomProject\/goleveldb\/leveldb\"\n)\n\nvar (\n\tErrFeeTooHigh = errors.New(\"wallet: Overpaying Fee\")\n\tErrFeeTooLow = errors.New(\"wallet: Insufficient Fee\")\n\tErrNoSuchAddress = errors.New(\"wallet: No such address\")\n\tErrTXExists = errors.New(\"wallet: Transaction name already exists\")\n\tErrTXNotExists = errors.New(\"wallet: Transaction name was not found\")\n\tErrTXNoInputs = errors.New(\"wallet: Transaction has no inputs\")\n\tErrTXInvalidName = errors.New(\"wallet: Transaction name is not valid\")\n)\n\nfunc (w *Wallet) NewTransaction(name string) error {\n\tif _, exist := w.transactions[name]; exist {\n\t\treturn ErrTXExists\n\t}\n\n\t\/\/ check that the transaction name is valid\n\tif name == \"\" {\n\t\treturn ErrTXInvalidName\n\t}\n\tif len(name) > 32 {\n\t\treturn ErrTXInvalidName\n\t}\n\tif match, err := regexp.MatchString(\"[^a-zA-Z0-9_-]\", name); err != nil {\n\t\treturn err\n\t} else if match {\n\t\treturn ErrTXInvalidName\n\t}\n\n\tt := new(factoid.Transaction)\n\tt.SetTimestamp(primitives.NewTimestampNow())\n\tw.transactions[name] = t\n\treturn nil\n}\n\nfunc (w *Wallet) DeleteTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\tdelete(w.transactions, name)\n\treturn nil\n}\n\nfunc (w *Wallet) AddInput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ta, err := w.GetFCTAddress(address)\n\tif err == leveldb.ErrNotFound {\n\t\treturn ErrNoSuchAddress\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\t\/\/ First look if this is really an update\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tinput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Add our new input\n\ttrans.AddInput(adr, amount)\n\ttrans.AddRCD(factoid.NewRCD_1(a.PubBytes()))\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddECOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddECOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tamt, err := factoid.ValidateAmounts(input.GetAmount(), transfee)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tinput.SetAmount(amt)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an input to the transaction.\", address)\n}\n\nfunc (w *Wallet) SubFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(output.GetAmount() - transfee)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an output to the transaction.\", address)\n}\n\nfunc (w *Wallet) SignTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif err := checkFee(trans); err != nil {\n\t\treturn err\n\t}\n\n\tdata, err := trans.MarshalBinarySig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trcds := trans.GetRCDs()\n\tif len(rcds) == 0 {\n\t\treturn ErrTXNoInputs\n\t}\n\tfor i, rcd := range rcds {\n\t\ta, err := rcd.GetAddress()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := w.GetFCTAddress(primitives.ConvertFctAddressToUserStr(a))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsig := factoid.NewSingleSignatureBlock(f.SecBytes(), data)\n\t\ttrans.SetSignatureBlock(i, sig)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wallet) GetTransactions() map[string]*factoid.Transaction {\n\treturn w.transactions\n}\n\nfunc (w *Wallet) ComposeTransaction(name string) (*factom.JSON2Request, error) {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn nil, ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ttype txreq struct {\n\t\tTransaction string `json:\"transaction\"`\n\t}\n\n\tparam := new(txreq)\n\tif p, err := trans.MarshalBinary(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tparam.Transaction = hex.EncodeToString(p)\n\t}\n\n\treq := factom.NewJSON2Request(\"factoid-submit\", apiCounter(), param)\n\n\treturn req, nil\n}\n\nfunc checkFee(t *factoid.Transaction) error {\n\tins, err := t.TotalInputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\touts, err := t.TotalOutputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tecs, err := t.TotalECs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fee is the fee that will be paid\n\tfee := int64(ins) - int64(outs) - int64(ecs)\n\t\n\tif fee <= 0 {\n\t\treturn ErrFeeTooLow\n\t}\n\n\trate, err := factom.GetRate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ cfee is the fee calculated for the transaction\n\tvar cfee int64\n\tif c, err := t.CalculateFee(rate); err != nil {\n\t\treturn err\n\t} else if c == 0 {\n\t\treturn errors.New(\"wallet: Could not calculate fee\")\n\t} else {\n\t\tcfee = int64(c)\n\t}\n\n\t\/\/ fee is too low\n\tif fee < cfee {\n\t\treturn ErrFeeTooLow\n\t}\n\n\t\/\/ fee is too high (over 10x cfee)\n\tif fee >= cfee*10 {\n\t\treturn ErrFeeTooHigh\n\t}\n\n\treturn nil\n}\n<commit_msg>replace instead of combine outputs to the same address<commit_after>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage wallet\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com\/FactomProject\/btcutil\/base58\"\n\t\"github.com\/FactomProject\/factom\"\n\t\"github.com\/FactomProject\/factomd\/common\/factoid\"\n\t\"github.com\/FactomProject\/factomd\/common\/primitives\"\n\t\"github.com\/FactomProject\/goleveldb\/leveldb\"\n)\n\nvar (\n\tErrFeeTooHigh = errors.New(\"wallet: Overpaying Fee\")\n\tErrFeeTooLow = errors.New(\"wallet: Insufficient Fee\")\n\tErrNoSuchAddress = errors.New(\"wallet: No such address\")\n\tErrTXExists = errors.New(\"wallet: Transaction name already exists\")\n\tErrTXNotExists = errors.New(\"wallet: Transaction name was not found\")\n\tErrTXNoInputs = errors.New(\"wallet: Transaction has no inputs\")\n\tErrTXInvalidName = errors.New(\"wallet: Transaction name is not valid\")\n)\n\nfunc (w *Wallet) NewTransaction(name string) error {\n\tif _, exist := w.transactions[name]; exist {\n\t\treturn ErrTXExists\n\t}\n\n\t\/\/ check that the transaction name is valid\n\tif name == \"\" {\n\t\treturn ErrTXInvalidName\n\t}\n\tif len(name) > 32 {\n\t\treturn ErrTXInvalidName\n\t}\n\tif match, err := regexp.MatchString(\"[^a-zA-Z0-9_-]\", name); err != nil {\n\t\treturn err\n\t} else if match {\n\t\treturn ErrTXInvalidName\n\t}\n\n\tt := new(factoid.Transaction)\n\tt.SetTimestamp(primitives.NewTimestampNow())\n\tw.transactions[name] = t\n\treturn nil\n}\n\nfunc (w *Wallet) DeleteTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\tdelete(w.transactions, name)\n\treturn nil\n}\n\nfunc (w *Wallet) AddInput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ta, err := w.GetFCTAddress(address)\n\tif err == leveldb.ErrNotFound {\n\t\treturn ErrNoSuchAddress\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\t\/\/ First look if this is really an update\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tinput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Add our new input\n\ttrans.AddInput(adr, amount)\n\ttrans.AddRCD(factoid.NewRCD_1(a.PubBytes()))\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\t\/\/ First look if this is really an update\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\t\n\ttrans.AddOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddECOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\t\/\/ First look if this is really an update\n\tfor _, output := range trans.GetECOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\t\n\ttrans.AddECOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tamt, err := factoid.ValidateAmounts(input.GetAmount(), transfee)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tinput.SetAmount(amt)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an input to the transaction.\", address)\n}\n\nfunc (w *Wallet) SubFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(output.GetAmount() - transfee)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an output to the transaction.\", address)\n}\n\nfunc (w *Wallet) SignTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif err := checkFee(trans); err != nil {\n\t\treturn err\n\t}\n\n\tdata, err := trans.MarshalBinarySig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trcds := trans.GetRCDs()\n\tif len(rcds) == 0 {\n\t\treturn ErrTXNoInputs\n\t}\n\tfor i, rcd := range rcds {\n\t\ta, err := rcd.GetAddress()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := w.GetFCTAddress(primitives.ConvertFctAddressToUserStr(a))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsig := factoid.NewSingleSignatureBlock(f.SecBytes(), data)\n\t\ttrans.SetSignatureBlock(i, sig)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wallet) GetTransactions() map[string]*factoid.Transaction {\n\treturn w.transactions\n}\n\nfunc (w *Wallet) ComposeTransaction(name string) (*factom.JSON2Request, error) {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn nil, ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ttype txreq struct {\n\t\tTransaction string `json:\"transaction\"`\n\t}\n\n\tparam := new(txreq)\n\tif p, err := trans.MarshalBinary(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tparam.Transaction = hex.EncodeToString(p)\n\t}\n\n\treq := factom.NewJSON2Request(\"factoid-submit\", apiCounter(), param)\n\n\treturn req, nil\n}\n\nfunc checkFee(t *factoid.Transaction) error {\n\tins, err := t.TotalInputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\touts, err := t.TotalOutputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tecs, err := t.TotalECs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fee is the fee that will be paid\n\tfee := int64(ins) - int64(outs) - int64(ecs)\n\t\n\tif fee <= 0 {\n\t\treturn ErrFeeTooLow\n\t}\n\n\trate, err := factom.GetRate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ cfee is the fee calculated for the transaction\n\tvar cfee int64\n\tif c, err := t.CalculateFee(rate); err != nil {\n\t\treturn err\n\t} else if c == 0 {\n\t\treturn errors.New(\"wallet: Could not calculate fee\")\n\t} else {\n\t\tcfee = int64(c)\n\t}\n\n\t\/\/ fee is too low\n\tif fee < cfee {\n\t\treturn ErrFeeTooLow\n\t}\n\n\t\/\/ fee is too high (over 10x cfee)\n\tif fee >= cfee*10 {\n\t\treturn ErrFeeTooHigh\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-docopt\"\n\t\"github.com\/flynn\/flynn\/bootstrap\"\n)\n\nfunc init() {\n\tRegister(\"bootstrap\", runBootstrap, `\nusage: flynn-host bootstrap [--min-hosts=<min>] [--json] [<manifest>]\n\nOptions:\n -n, --min-hosts=<min> minimum number of hosts required to be online [default: 1]\n --json format log output as json\n\nBootstrap layer 1 using the provided manifest`)\n}\n\nfunc readBootstrapManifest(name string) ([]byte, error) {\n\tif name == \"\" || name == \"-\" {\n\t\treturn ioutil.ReadAll(os.Stdin)\n\t}\n\treturn ioutil.ReadFile(name)\n}\n\nvar manifest []byte\n\nfunc runBootstrap(args *docopt.Args) {\n\tlog.SetFlags(log.Lmicroseconds)\n\tlogf := textLogger\n\tif args.Bool[\"--json\"] {\n\t\tlogf = jsonLogger\n\t}\n\n\tvar err error\n\tmanifest, err = readBootstrapManifest(args.String[\"<manifest>\"])\n\tif err != nil {\n\t\tlog.Fatalln(\"Error reading manifest:\", err)\n\t}\n\n\tch := make(chan *bootstrap.StepInfo)\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor si := range ch {\n\t\t\tlogf(si)\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tminHosts, _ := strconv.Atoi(args.String[\"<min>\"])\n\terr = bootstrap.Run(manifest, ch, minHosts)\n\t<-done\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc highlightBytePosition(manifest []byte, pos int64) (line, col int, highlight string) {\n\t\/\/ This function a modified version of a function in Camlistore written by Brad Fitzpatrick\n\t\/\/ https:\/\/github.com\/bradfitz\/camlistore\/blob\/830c6966a11ddb7834a05b6106b2530284a4d036\/pkg\/errorutil\/highlight.go\n\tline = 1\n\tvar lastLine string\n\tvar currLine bytes.Buffer\n\tfor i := int64(0); i < pos; i++ {\n\t\tb := manifest[i]\n\t\tif b == '\\n' {\n\t\t\tlastLine = currLine.String()\n\t\t\tcurrLine.Reset()\n\t\t\tline++\n\t\t\tcol = 1\n\t\t} else {\n\t\t\tcol++\n\t\t\tcurrLine.WriteByte(b)\n\t\t}\n\t}\n\tif line > 1 {\n\t\thighlight += fmt.Sprintf(\"%5d: %s\\n\", line-1, lastLine)\n\t}\n\thighlight += fmt.Sprintf(\"%5d: %s\\n\", line, currLine.String())\n\thighlight += fmt.Sprintf(\"%s^\\n\", strings.Repeat(\" \", col+5))\n\treturn\n}\n\nfunc textLogger(si *bootstrap.StepInfo) {\n\tswitch si.State {\n\tcase \"start\":\n\t\tlog.Printf(\"%s %s\", si.Action, si.ID)\n\tcase \"done\":\n\t\tif s, ok := si.StepData.(fmt.Stringer); ok {\n\t\t\tlog.Printf(\"%s %s %s\", si.Action, si.ID, s)\n\t\t}\n\tcase \"error\":\n\t\tif serr, ok := si.Err.(*json.SyntaxError); ok {\n\t\t\tline, col, highlight := highlightBytePosition(manifest, serr.Offset)\n\t\t\tfmt.Printf(\"Error parsing JSON: %s\\nAt line %d, column %d (offset %d):\\n%s\", si.Err, line, col, serr.Offset, highlight)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"%s %s error: %s\", si.Action, si.ID, si.Error)\n\t}\n}\n\nfunc jsonLogger(si *bootstrap.StepInfo) {\n\tjson.NewEncoder(os.Stdout).Encode(si)\n}\n<commit_msg>host\/cli: Fix bootstrap min-hosts argument<commit_after>package cli\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-docopt\"\n\t\"github.com\/flynn\/flynn\/bootstrap\"\n)\n\nfunc init() {\n\tRegister(\"bootstrap\", runBootstrap, `\nusage: flynn-host bootstrap [--min-hosts=<min>] [--json] [<manifest>]\n\nOptions:\n -n, --min-hosts=<min> minimum number of hosts required to be online [default: 1]\n --json format log output as json\n\nBootstrap layer 1 using the provided manifest`)\n}\n\nfunc readBootstrapManifest(name string) ([]byte, error) {\n\tif name == \"\" || name == \"-\" {\n\t\treturn ioutil.ReadAll(os.Stdin)\n\t}\n\treturn ioutil.ReadFile(name)\n}\n\nvar manifest []byte\n\nfunc runBootstrap(args *docopt.Args) {\n\tlog.SetFlags(log.Lmicroseconds)\n\tlogf := textLogger\n\tif args.Bool[\"--json\"] {\n\t\tlogf = jsonLogger\n\t}\n\n\tvar err error\n\tmanifest, err = readBootstrapManifest(args.String[\"<manifest>\"])\n\tif err != nil {\n\t\tlog.Fatalln(\"Error reading manifest:\", err)\n\t}\n\n\tch := make(chan *bootstrap.StepInfo)\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor si := range ch {\n\t\t\tlogf(si)\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tminHosts, _ := strconv.Atoi(args.String[\"--min-hosts\"])\n\terr = bootstrap.Run(manifest, ch, minHosts)\n\t<-done\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc highlightBytePosition(manifest []byte, pos int64) (line, col int, highlight string) {\n\t\/\/ This function a modified version of a function in Camlistore written by Brad Fitzpatrick\n\t\/\/ https:\/\/github.com\/bradfitz\/camlistore\/blob\/830c6966a11ddb7834a05b6106b2530284a4d036\/pkg\/errorutil\/highlight.go\n\tline = 1\n\tvar lastLine string\n\tvar currLine bytes.Buffer\n\tfor i := int64(0); i < pos; i++ {\n\t\tb := manifest[i]\n\t\tif b == '\\n' {\n\t\t\tlastLine = currLine.String()\n\t\t\tcurrLine.Reset()\n\t\t\tline++\n\t\t\tcol = 1\n\t\t} else {\n\t\t\tcol++\n\t\t\tcurrLine.WriteByte(b)\n\t\t}\n\t}\n\tif line > 1 {\n\t\thighlight += fmt.Sprintf(\"%5d: %s\\n\", line-1, lastLine)\n\t}\n\thighlight += fmt.Sprintf(\"%5d: %s\\n\", line, currLine.String())\n\thighlight += fmt.Sprintf(\"%s^\\n\", strings.Repeat(\" \", col+5))\n\treturn\n}\n\nfunc textLogger(si *bootstrap.StepInfo) {\n\tswitch si.State {\n\tcase \"start\":\n\t\tlog.Printf(\"%s %s\", si.Action, si.ID)\n\tcase \"done\":\n\t\tif s, ok := si.StepData.(fmt.Stringer); ok {\n\t\t\tlog.Printf(\"%s %s %s\", si.Action, si.ID, s)\n\t\t}\n\tcase \"error\":\n\t\tif serr, ok := si.Err.(*json.SyntaxError); ok {\n\t\t\tline, col, highlight := highlightBytePosition(manifest, serr.Offset)\n\t\t\tfmt.Printf(\"Error parsing JSON: %s\\nAt line %d, column %d (offset %d):\\n%s\", si.Err, line, col, serr.Offset, highlight)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"%s %s error: %s\", si.Action, si.ID, si.Error)\n\t}\n}\n\nfunc jsonLogger(si *bootstrap.StepInfo) {\n\tjson.NewEncoder(os.Stdout).Encode(si)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The math package provides basic constants and mathematical functions.\npackage math\n\n\/\/ Mathematical constants.\n\/\/ Reference: http:\/\/www.research.att.com\/~njas\/sequences\/Axxxxxx\nconst (\n\tE\t= 2.71828182845904523536028747135266249775724709369995957496696763; \/\/ A001113\n\tPi\t= 3.14159265358979323846264338327950288419716939937510582097494459; \/\/ A000796\n\tPhi\t= 1.61803398874989484820458683436563811772030917980576286213544862; \/\/ A001622\n\n\tSqrt2\t= 1.41421356237309504880168872420969807856967187537694807317667974; \/\/ A002193\n\tSqrtE\t= 1.64872127070012814684865078781416357165377610071014801157507931; \/\/ A019774\n\tSqrtPi\t= 1.77245385090551602729816748334114518279754945612238712821380779; \/\/ A002161\n\tSqrtPhi\t= 1.27201964951406896425242246173749149171560804184009624861664038; \/\/ A139339\n\n\tLn2\t= 0.693147180559945309417232121458176568075500134360255254120680009; \/\/ A002162\n\tLog2E\t= 1\/Ln2;\n\tLn10\t= 2.30258509299404568401799145468436420760110148862877297603332790; \/\/ A002392\n\tLog10E\t= 1\/Ln10;\n\n\tMaxFloat32\t= 3.40282346638528860e+38;\n\tMinFloat32\t= 1.40129846432481707e-45;\n\tMaxFloat64\t= 1.7976931348623157e+308;\n\tMinFloat64\t= 5.0e-324;\n)\n\n\/\/ BUG(rsc): The manual should define the special cases for all of these functions.\n<commit_msg>constants for integer limits<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The math package provides basic constants and mathematical functions.\npackage math\n\n\/\/ Mathematical constants.\n\/\/ Reference: http:\/\/www.research.att.com\/~njas\/sequences\/Axxxxxx\nconst (\n\tE\t= 2.71828182845904523536028747135266249775724709369995957496696763; \/\/ A001113\n\tPi\t= 3.14159265358979323846264338327950288419716939937510582097494459; \/\/ A000796\n\tPhi\t= 1.61803398874989484820458683436563811772030917980576286213544862; \/\/ A001622\n\n\tSqrt2\t= 1.41421356237309504880168872420969807856967187537694807317667974; \/\/ A002193\n\tSqrtE\t= 1.64872127070012814684865078781416357165377610071014801157507931; \/\/ A019774\n\tSqrtPi\t= 1.77245385090551602729816748334114518279754945612238712821380779; \/\/ A002161\n\tSqrtPhi\t= 1.27201964951406896425242246173749149171560804184009624861664038; \/\/ A139339\n\n\tLn2\t= 0.693147180559945309417232121458176568075500134360255254120680009; \/\/ A002162\n\tLog2E\t= 1\/Ln2;\n\tLn10\t= 2.30258509299404568401799145468436420760110148862877297603332790; \/\/ A002392\n\tLog10E\t= 1\/Ln10;\n)\n\n\/\/ Limit values\nconst (\n\tMaxFloat32\t= 3.40282346638528860e+38;\n\tMinFloat32\t= 1.40129846432481707e-45;\n\tMaxFloat64\t= 1.7976931348623157e+308;\n\tMinFloat64\t= 5.0e-324;\n\n\tMaxInt8 = 1<<7 - 1;\n\tMinInt8 = -1<<7;\n\tMaxInt16 = 1<<15 - 1;\n\tMinInt16 = -1<<15;\n\tMaxInt32 = 1<<31 - 1;\n\tMinInt32 = -1<<31;\n\tMaxInt64 = 1<<63 - 1;\n\tMinInt64 = -1<<63;\n\tMaxUint8 = 1<<8 - 1;\n\tMaxUint16 = 1<<16 - 1;\n\tMaxUint32 = 1<<32 - 1;\n\tMaxUint64 = 1<<64 - 1;\n)\n\n\/\/ BUG(rsc): The manual should define the special cases for all of these functions.\n<|endoftext|>"} {"text":"<commit_before>package integration\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/containers\/buildah\"\n\t\"github.com\/containers\/image\/copy\"\n\t\"github.com\/containers\/image\/signature\"\n\t\"github.com\/containers\/image\/storage\"\n\t\"github.com\/containers\/image\/transports\/alltransports\"\n\t\"github.com\/containers\/image\/types\"\n\tsstorage \"github.com\/containers\/storage\"\n\t\"github.com\/containers\/storage\/pkg\/reexec\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\tINTEGRATION_ROOT string\n\tSTORAGE_OPTIONS = \"--storage-driver vfs\"\n\tARTIFACT_DIR = \"\/tmp\/.artifacts\"\n\tCACHE_IMAGES = []string{\"alpine\", \"busybox\", FEDORA_MINIMAL}\n\tRESTORE_IMAGES = []string{\"alpine\", \"busybox\"}\n\tALPINE = \"docker.io\/library\/alpine:latest\"\n\tBB_GLIBC = \"docker.io\/library\/busybox:glibc\"\n\tFEDORA_MINIMAL = \"registry.fedoraproject.org\/fedora-minimal:latest\"\n\tdefaultWaitTimeout = 90\n)\n\n\/\/ BuildAhSession wraps the gexec.session so we can extend it\ntype BuildAhSession struct {\n\t*gexec.Session\n}\n\n\/\/ BuildAhTest struct for command line options\ntype BuildAhTest struct {\n\tBuildAhBinary string\n\tRunRoot string\n\tStorageOptions string\n\tArtifactPath string\n\tTempDir string\n\tSignaturePath string\n\tRoot string\n\tRegistriesConf string\n}\n\n\/\/ TestBuildAh ginkgo master function\nfunc TestBuildAh(t *testing.T) {\n\tif reexec.Init() {\n\t\tos.Exit(1)\n\t}\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Buildah Suite\")\n}\n\nvar _ = BeforeSuite(func() {\n\t\/\/Cache images\n\tcwd, _ := os.Getwd()\n\tINTEGRATION_ROOT = filepath.Join(cwd, \"..\/..\/\")\n\tbuildah := BuildahCreate(\"\/tmp\")\n\tbuildah.ArtifactPath = ARTIFACT_DIR\n\tif _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {\n\t\tif err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {\n\t\t\tfmt.Printf(\"%q\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tfor _, image := range CACHE_IMAGES {\n\t\tfmt.Printf(\"Caching %s...\\n\", image)\n\t\tif err := buildah.CreateArtifact(image); err != nil {\n\t\t\tfmt.Printf(\"%q\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n})\n\n\/\/ CreateTempDirin\nfunc CreateTempDirInTempDir() (string, error) {\n\treturn ioutil.TempDir(\"\", \"buildah_test\")\n}\n\n\/\/ BuildahCreate a BuildAhTest instance for the tests\nfunc BuildahCreate(tempDir string) BuildAhTest {\n\tcwd, _ := os.Getwd()\n\n\tbuildAhBinary := filepath.Join(cwd, \"..\/..\/buildah\")\n\tif os.Getenv(\"BUILDAH_BINARY\") != \"\" {\n\t\tbuildAhBinary = os.Getenv(\"BUILDAH_BINARY\")\n\t}\n\tstorageOptions := STORAGE_OPTIONS\n\tif os.Getenv(\"STORAGE_OPTIONS\") != \"\" {\n\t\tstorageOptions = os.Getenv(\"STORAGE_OPTIONS\")\n\t}\n\n\treturn BuildAhTest{\n\t\tBuildAhBinary: buildAhBinary,\n\t\tRunRoot: filepath.Join(tempDir, \"runroot\"),\n\t\tRoot: filepath.Join(tempDir, \"root\"),\n\t\tStorageOptions: storageOptions,\n\t\tArtifactPath: ARTIFACT_DIR,\n\t\tTempDir: tempDir,\n\t\tSignaturePath: \"..\/..\/tests\/policy.json\",\n\t\tRegistriesConf: \"..\/..\/registries.conf\",\n\t}\n}\n\n\/\/MakeOptions assembles all the buildah main options\nfunc (p *BuildAhTest) MakeOptions() []string {\n\treturn strings.Split(fmt.Sprintf(\"--root %s --runroot %s --registries-conf %s\",\n\t\tp.Root, p.RunRoot, p.RegistriesConf), \" \")\n}\n\n\/\/ BuildAh is the exec call to buildah on the filesystem\nfunc (p *BuildAhTest) BuildAh(args []string) *BuildAhSession {\n\tbuildAhOptions := p.MakeOptions()\n\tbuildAhOptions = append(buildAhOptions, strings.Split(p.StorageOptions, \" \")...)\n\tbuildAhOptions = append(buildAhOptions, args...)\n\tfmt.Printf(\"Running: %s %s\\n\", p.BuildAhBinary, strings.Join(buildAhOptions, \" \"))\n\tcommand := exec.Command(p.BuildAhBinary, buildAhOptions...)\n\tsession, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)\n\tif err != nil {\n\t\tFail(fmt.Sprintf(\"unable to run buildah command: %s\", strings.Join(buildAhOptions, \" \")))\n\t}\n\treturn &BuildAhSession{session}\n}\n\n\/\/ Cleanup cleans up the temporary store\nfunc (p *BuildAhTest) Cleanup() {\n\t\/\/ Nuke tempdir\n\tif err := os.RemoveAll(p.TempDir); err != nil {\n\t\tfmt.Printf(\"%q\\n\", err)\n\t}\n}\n\n\/\/ GrepString takes session output and behaves like grep. it returns a bool\n\/\/ if successful and an array of strings on positive matches\nfunc (s *BuildAhSession) GrepString(term string) (bool, []string) {\n\tvar (\n\t\tgreps []string\n\t\tmatches bool\n\t)\n\n\tfor _, line := range strings.Split(s.OutputToString(), \"\\n\") {\n\t\tif strings.Contains(line, term) {\n\t\t\tmatches = true\n\t\t\tgreps = append(greps, line)\n\t\t}\n\t}\n\treturn matches, greps\n}\n\n\/\/ OutputToString formats session output to string\nfunc (s *BuildAhSession) OutputToString() string {\n\tfields := strings.Fields(fmt.Sprintf(\"%s\", s.Out.Contents()))\n\treturn strings.Join(fields, \" \")\n}\n\n\/\/ OutputToStringArray returns the output as a []string\n\/\/ where each array item is a line split by newline\nfunc (s *BuildAhSession) OutputToStringArray() []string {\n\toutput := fmt.Sprintf(\"%s\", s.Out.Contents())\n\treturn strings.Split(output, \"\\n\")\n}\n\n\/\/ IsJSONOutputValid attempts to unmarshall the session buffer\n\/\/ and if successful, returns true, else false\nfunc (s *BuildAhSession) IsJSONOutputValid() bool {\n\tvar i interface{}\n\tif err := json.Unmarshal(s.Out.Contents(), &i); err != nil {\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (s *BuildAhSession) WaitWithDefaultTimeout() {\n\ts.Wait(defaultWaitTimeout)\n}\n\n\/\/ SystemExec is used to exec a system command to check its exit code or output\nfunc (p *BuildAhTest) SystemExec(command string, args []string) *BuildAhSession {\n\tc := exec.Command(command, args...)\n\tsession, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)\n\tif err != nil {\n\t\tFail(fmt.Sprintf(\"unable to run command: %s %s\", command, strings.Join(args, \" \")))\n\t}\n\treturn &BuildAhSession{session}\n}\n\n\/\/ CreateArtifact creates a cached image in the artifact dir\nfunc (p *BuildAhTest) CreateArtifact(image string) error {\n\timageName := fmt.Sprintf(\"docker:\/\/%s\", image)\n\tsystemContext := types.SystemContext{\n\t\tSignaturePolicyPath: p.SignaturePath,\n\t}\n\tpolicy, err := signature.DefaultPolicy(&systemContext)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tpolicyContext, err := signature.NewPolicyContext(policy)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tdefer func() {\n\t\t_ = policyContext.Destroy()\n\t}()\n\toptions := ©.Options{}\n\n\timportRef, err := alltransports.ParseImageName(imageName)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name %v: %v\", image, err)\n\t}\n\n\timageDir := strings.Replace(image, \"\/\", \"_\", -1)\n\texportTo := filepath.Join(\"dir:\", p.ArtifactPath, imageDir)\n\texportRef, err := alltransports.ParseImageName(exportTo)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name %v: %v\", exportTo, err)\n\t}\n\n\treturn copy.Image(context.Background(), policyContext, exportRef, importRef, options)\n}\n\n\/\/ RestoreArtifact puts the cached image into our test store\nfunc (p *BuildAhTest) RestoreArtifact(image string) error {\n\tstoreOptions := sstorage.DefaultStoreOptions\n\tstoreOptions.GraphDriverName = \"vfs\"\n\t\/\/storeOptions.GraphDriverOptions = storageOptions\n\tstoreOptions.GraphRoot = p.Root\n\tstoreOptions.RunRoot = p.RunRoot\n\tstore, err := sstorage.GetStore(storeOptions)\n\n\toptions := ©.Options{}\n\tif err != nil {\n\t\treturn errors.Errorf(\"error opening storage: %v\", err)\n\t}\n\tdefer func() {\n\t\t_, _ = store.Shutdown(false)\n\t}()\n\n\tstorage.Transport.SetStore(store)\n\tref, err := storage.Transport.ParseStoreReference(store, image)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name: %v\", err)\n\t}\n\n\timageDir := strings.Replace(image, \"\/\", \"_\", -1)\n\timportFrom := fmt.Sprintf(\"dir:%s\", filepath.Join(p.ArtifactPath, imageDir))\n\timportRef, err := alltransports.ParseImageName(importFrom)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name %v: %v\", image, err)\n\t}\n\tsystemContext := types.SystemContext{\n\t\tSignaturePolicyPath: p.SignaturePath,\n\t}\n\tpolicy, err := signature.DefaultPolicy(&systemContext)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tpolicyContext, err := signature.NewPolicyContext(policy)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tdefer func() {\n\t\t_ = policyContext.Destroy()\n\t}()\n\terr = copy.Image(context.Background(), policyContext, ref, importRef, options)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error importing %s: %v\", importFrom, err)\n\t}\n\treturn nil\n}\n\n\/\/ RestoreAllArtifacts unpacks all cached images\nfunc (p *BuildAhTest) RestoreAllArtifacts() error {\n\tfor _, image := range RESTORE_IMAGES {\n\t\tif err := p.RestoreArtifact(image); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ StringInSlice determines if a string is in a string slice, returns bool\nfunc StringInSlice(s string, sl []string) bool {\n\tfor _, i := range sl {\n\t\tif i == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/LineInOutputStartsWith returns true if a line in a\n\/\/ session output starts with the supplied string\nfunc (s *BuildAhSession) LineInOuputStartsWith(term string) bool {\n\tfor _, i := range s.OutputToStringArray() {\n\t\tif strings.HasPrefix(i, term) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/LineInOutputContains returns true if a line in a\n\/\/ session output starts with the supplied string\nfunc (s *BuildAhSession) LineInOuputContains(term string) bool {\n\tfor _, i := range s.OutputToStringArray() {\n\t\tif strings.Contains(i, term) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ InspectContainerToJSON takes the session output of an inspect\n\/\/ container and returns json\nfunc (s *BuildAhSession) InspectImageJSON() buildah.BuilderInfo {\n\tvar i buildah.BuilderInfo\n\terr := json.Unmarshal(s.Out.Contents(), &i)\n\tExpect(err).To(BeNil())\n\treturn i\n}\n<commit_msg>Update calls in e2e to addres 1101<commit_after>package integration\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/containers\/buildah\"\n\t\"github.com\/containers\/image\/copy\"\n\t\"github.com\/containers\/image\/signature\"\n\t\"github.com\/containers\/image\/storage\"\n\t\"github.com\/containers\/image\/transports\/alltransports\"\n\t\"github.com\/containers\/image\/types\"\n\tsstorage \"github.com\/containers\/storage\"\n\t\"github.com\/containers\/storage\/pkg\/reexec\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\tINTEGRATION_ROOT string\n\tSTORAGE_OPTIONS = \"--storage-driver vfs\"\n\tARTIFACT_DIR = \"\/tmp\/.artifacts\"\n\tCACHE_IMAGES = []string{\"alpine\", \"busybox\", FEDORA_MINIMAL}\n\tRESTORE_IMAGES = []string{\"alpine\", \"busybox\"}\n\tALPINE = \"docker.io\/library\/alpine:latest\"\n\tBB_GLIBC = \"docker.io\/library\/busybox:glibc\"\n\tFEDORA_MINIMAL = \"registry.fedoraproject.org\/fedora-minimal:latest\"\n\tdefaultWaitTimeout = 90\n)\n\n\/\/ BuildAhSession wraps the gexec.session so we can extend it\ntype BuildAhSession struct {\n\t*gexec.Session\n}\n\n\/\/ BuildAhTest struct for command line options\ntype BuildAhTest struct {\n\tBuildAhBinary string\n\tRunRoot string\n\tStorageOptions string\n\tArtifactPath string\n\tTempDir string\n\tSignaturePath string\n\tRoot string\n\tRegistriesConf string\n}\n\n\/\/ TestBuildAh ginkgo master function\nfunc TestBuildAh(t *testing.T) {\n\tif reexec.Init() {\n\t\tos.Exit(1)\n\t}\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Buildah Suite\")\n}\n\nvar _ = BeforeSuite(func() {\n\t\/\/Cache images\n\tcwd, _ := os.Getwd()\n\tINTEGRATION_ROOT = filepath.Join(cwd, \"..\/..\/\")\n\tbuildah := BuildahCreate(\"\/tmp\")\n\tbuildah.ArtifactPath = ARTIFACT_DIR\n\tif _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {\n\t\tif err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {\n\t\t\tfmt.Printf(\"%q\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tfor _, image := range CACHE_IMAGES {\n\t\tfmt.Printf(\"Caching %s...\\n\", image)\n\t\tif err := buildah.CreateArtifact(image); err != nil {\n\t\t\tfmt.Printf(\"%q\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n})\n\n\/\/ CreateTempDirin\nfunc CreateTempDirInTempDir() (string, error) {\n\treturn ioutil.TempDir(\"\", \"buildah_test\")\n}\n\n\/\/ BuildahCreate a BuildAhTest instance for the tests\nfunc BuildahCreate(tempDir string) BuildAhTest {\n\tcwd, _ := os.Getwd()\n\n\tbuildAhBinary := filepath.Join(cwd, \"..\/..\/buildah\")\n\tif os.Getenv(\"BUILDAH_BINARY\") != \"\" {\n\t\tbuildAhBinary = os.Getenv(\"BUILDAH_BINARY\")\n\t}\n\tstorageOptions := STORAGE_OPTIONS\n\tif os.Getenv(\"STORAGE_OPTIONS\") != \"\" {\n\t\tstorageOptions = os.Getenv(\"STORAGE_OPTIONS\")\n\t}\n\n\treturn BuildAhTest{\n\t\tBuildAhBinary: buildAhBinary,\n\t\tRunRoot: filepath.Join(tempDir, \"runroot\"),\n\t\tRoot: filepath.Join(tempDir, \"root\"),\n\t\tStorageOptions: storageOptions,\n\t\tArtifactPath: ARTIFACT_DIR,\n\t\tTempDir: tempDir,\n\t\tSignaturePath: \"..\/..\/tests\/policy.json\",\n\t\tRegistriesConf: \"..\/..\/registries.conf\",\n\t}\n}\n\n\/\/MakeOptions assembles all the buildah main options\nfunc (p *BuildAhTest) MakeOptions() []string {\n\treturn strings.Split(fmt.Sprintf(\"--root %s --runroot %s --registries-conf %s\",\n\t\tp.Root, p.RunRoot, p.RegistriesConf), \" \")\n}\n\n\/\/ BuildAh is the exec call to buildah on the filesystem\nfunc (p *BuildAhTest) BuildAh(args []string) *BuildAhSession {\n\tbuildAhOptions := p.MakeOptions()\n\tbuildAhOptions = append(buildAhOptions, strings.Split(p.StorageOptions, \" \")...)\n\tbuildAhOptions = append(buildAhOptions, args...)\n\tfmt.Printf(\"Running: %s %s\\n\", p.BuildAhBinary, strings.Join(buildAhOptions, \" \"))\n\tcommand := exec.Command(p.BuildAhBinary, buildAhOptions...)\n\tsession, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)\n\tif err != nil {\n\t\tFail(fmt.Sprintf(\"unable to run buildah command: %s\", strings.Join(buildAhOptions, \" \")))\n\t}\n\treturn &BuildAhSession{session}\n}\n\n\/\/ Cleanup cleans up the temporary store\nfunc (p *BuildAhTest) Cleanup() {\n\t\/\/ Nuke tempdir\n\tif err := os.RemoveAll(p.TempDir); err != nil {\n\t\tfmt.Printf(\"%q\\n\", err)\n\t}\n}\n\n\/\/ GrepString takes session output and behaves like grep. it returns a bool\n\/\/ if successful and an array of strings on positive matches\nfunc (s *BuildAhSession) GrepString(term string) (bool, []string) {\n\tvar (\n\t\tgreps []string\n\t\tmatches bool\n\t)\n\n\tfor _, line := range strings.Split(s.OutputToString(), \"\\n\") {\n\t\tif strings.Contains(line, term) {\n\t\t\tmatches = true\n\t\t\tgreps = append(greps, line)\n\t\t}\n\t}\n\treturn matches, greps\n}\n\n\/\/ OutputToString formats session output to string\nfunc (s *BuildAhSession) OutputToString() string {\n\tfields := strings.Fields(fmt.Sprintf(\"%s\", s.Out.Contents()))\n\treturn strings.Join(fields, \" \")\n}\n\n\/\/ OutputToStringArray returns the output as a []string\n\/\/ where each array item is a line split by newline\nfunc (s *BuildAhSession) OutputToStringArray() []string {\n\toutput := fmt.Sprintf(\"%s\", s.Out.Contents())\n\treturn strings.Split(output, \"\\n\")\n}\n\n\/\/ IsJSONOutputValid attempts to unmarshall the session buffer\n\/\/ and if successful, returns true, else false\nfunc (s *BuildAhSession) IsJSONOutputValid() bool {\n\tvar i interface{}\n\tif err := json.Unmarshal(s.Out.Contents(), &i); err != nil {\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (s *BuildAhSession) WaitWithDefaultTimeout() {\n\ts.Wait(defaultWaitTimeout)\n}\n\n\/\/ SystemExec is used to exec a system command to check its exit code or output\nfunc (p *BuildAhTest) SystemExec(command string, args []string) *BuildAhSession {\n\tc := exec.Command(command, args...)\n\tsession, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)\n\tif err != nil {\n\t\tFail(fmt.Sprintf(\"unable to run command: %s %s\", command, strings.Join(args, \" \")))\n\t}\n\treturn &BuildAhSession{session}\n}\n\n\/\/ CreateArtifact creates a cached image in the artifact dir\nfunc (p *BuildAhTest) CreateArtifact(image string) error {\n\timageName := fmt.Sprintf(\"docker:\/\/%s\", image)\n\tsystemContext := types.SystemContext{\n\t\tSignaturePolicyPath: p.SignaturePath,\n\t}\n\tpolicy, err := signature.DefaultPolicy(&systemContext)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tpolicyContext, err := signature.NewPolicyContext(policy)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tdefer func() {\n\t\t_ = policyContext.Destroy()\n\t}()\n\toptions := ©.Options{}\n\n\timportRef, err := alltransports.ParseImageName(imageName)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name %v: %v\", image, err)\n\t}\n\n\timageDir := strings.Replace(image, \"\/\", \"_\", -1)\n\texportTo := filepath.Join(\"dir:\", p.ArtifactPath, imageDir)\n\texportRef, err := alltransports.ParseImageName(exportTo)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name %v: %v\", exportTo, err)\n\t}\n\n\t_, err = copy.Image(context.Background(), policyContext, exportRef, importRef, options)\n\treturn err\n}\n\n\/\/ RestoreArtifact puts the cached image into our test store\nfunc (p *BuildAhTest) RestoreArtifact(image string) error {\n\tstoreOptions := sstorage.DefaultStoreOptions\n\tstoreOptions.GraphDriverName = \"vfs\"\n\t\/\/storeOptions.GraphDriverOptions = storageOptions\n\tstoreOptions.GraphRoot = p.Root\n\tstoreOptions.RunRoot = p.RunRoot\n\tstore, err := sstorage.GetStore(storeOptions)\n\n\toptions := ©.Options{}\n\tif err != nil {\n\t\treturn errors.Errorf(\"error opening storage: %v\", err)\n\t}\n\tdefer func() {\n\t\t_, _ = store.Shutdown(false)\n\t}()\n\n\tstorage.Transport.SetStore(store)\n\tref, err := storage.Transport.ParseStoreReference(store, image)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name: %v\", err)\n\t}\n\n\timageDir := strings.Replace(image, \"\/\", \"_\", -1)\n\timportFrom := fmt.Sprintf(\"dir:%s\", filepath.Join(p.ArtifactPath, imageDir))\n\timportRef, err := alltransports.ParseImageName(importFrom)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error parsing image name %v: %v\", image, err)\n\t}\n\tsystemContext := types.SystemContext{\n\t\tSignaturePolicyPath: p.SignaturePath,\n\t}\n\tpolicy, err := signature.DefaultPolicy(&systemContext)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tpolicyContext, err := signature.NewPolicyContext(policy)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error loading signature policy: %v\", err)\n\t}\n\tdefer func() {\n\t\t_ = policyContext.Destroy()\n\t}()\n\t_, err = copy.Image(context.Background(), policyContext, ref, importRef, options)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error importing %s: %v\", importFrom, err)\n\t}\n\treturn nil\n}\n\n\/\/ RestoreAllArtifacts unpacks all cached images\nfunc (p *BuildAhTest) RestoreAllArtifacts() error {\n\tfor _, image := range RESTORE_IMAGES {\n\t\tif err := p.RestoreArtifact(image); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ StringInSlice determines if a string is in a string slice, returns bool\nfunc StringInSlice(s string, sl []string) bool {\n\tfor _, i := range sl {\n\t\tif i == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/LineInOutputStartsWith returns true if a line in a\n\/\/ session output starts with the supplied string\nfunc (s *BuildAhSession) LineInOuputStartsWith(term string) bool {\n\tfor _, i := range s.OutputToStringArray() {\n\t\tif strings.HasPrefix(i, term) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/LineInOutputContains returns true if a line in a\n\/\/ session output starts with the supplied string\nfunc (s *BuildAhSession) LineInOuputContains(term string) bool {\n\tfor _, i := range s.OutputToStringArray() {\n\t\tif strings.Contains(i, term) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ InspectContainerToJSON takes the session output of an inspect\n\/\/ container and returns json\nfunc (s *BuildAhSession) InspectImageJSON() buildah.BuilderInfo {\n\tvar i buildah.BuilderInfo\n\terr := json.Unmarshal(s.Out.Contents(), &i)\n\tExpect(err).To(BeNil())\n\treturn i\n}\n<|endoftext|>"} {"text":"<commit_before>package smoke_tests\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/config\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/workflowhelpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar (\n\tconfigPath = os.Getenv(\"CONFIG_PATH\")\n\ttestConfig = loadTestConfig(configPath)\n\twfh *workflowhelpers.ReproducibleTestSuiteSetup\n)\n\nfunc TestLifecycle(t *testing.T) {\n\tSynchronizedBeforeSuite(func() []byte {\n\n\t\twfh = workflowhelpers.NewTestSuiteSetup(&testConfig.Config)\n\t\twfh.Setup()\n\n\t\treturn []byte{}\n\t}, func([]byte) {\n\t})\n\n\tSynchronizedAfterSuite(func() {\n\t}, func() {\n\t\twfh.Teardown()\n\t})\n\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Lifecycle Suite\")\n}\n\nfunc loadTestConfig(configPath string) TestConfig {\n\tif configPath == \"\" {\n\t\tpanic(fmt.Errorf(\"Path to config file is empty -- Did you set CONFIG_PATH?\"))\n\t}\n\tconfigFile, err := os.Open(configPath)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not open config file at %s -- ERROR %s\", configPath, err.Error()))\n\t}\n\n\tdefer configFile.Close()\n\tvar testConfig TestConfig\n\terr = json.NewDecoder(configFile).Decode(&testConfig)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not decode config json -- ERROR: %s\", err.Error()))\n\t}\n\n\treturn testConfig\n}\n\ntype TestConfig struct {\n\tconfig.Config\n\n\tTestPlans []TestPlan `json:\"plans\"`\n\tServiceOffering string `json:\"service_offering\"`\n\tAppType string `json:\"app_type\"`\n}\n\ntype TestPlan struct {\n\tName string `json:\"name\"`\n}\n<commit_msg>Rename suite spec name<commit_after>package smoke_tests\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/config\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/workflowhelpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar (\n\tconfigPath = os.Getenv(\"CONFIG_PATH\")\n\ttestConfig = loadTestConfig(configPath)\n\twfh *workflowhelpers.ReproducibleTestSuiteSetup\n)\n\nfunc TestLifecycle(t *testing.T) {\n\tSynchronizedBeforeSuite(func() []byte {\n\n\t\twfh = workflowhelpers.NewTestSuiteSetup(&testConfig.Config)\n\t\twfh.Setup()\n\n\t\treturn []byte{}\n\t}, func([]byte) {\n\t})\n\n\tSynchronizedAfterSuite(func() {\n\t}, func() {\n\t\twfh.Teardown()\n\t})\n\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Smoke Tests Suite\")\n}\n\nfunc loadTestConfig(configPath string) TestConfig {\n\tif configPath == \"\" {\n\t\tpanic(fmt.Errorf(\"Path to config file is empty -- Did you set CONFIG_PATH?\"))\n\t}\n\tconfigFile, err := os.Open(configPath)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not open config file at %s -- ERROR %s\", configPath, err.Error()))\n\t}\n\n\tdefer configFile.Close()\n\tvar testConfig TestConfig\n\terr = json.NewDecoder(configFile).Decode(&testConfig)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not decode config json -- ERROR: %s\", err.Error()))\n\t}\n\n\treturn testConfig\n}\n\ntype TestConfig struct {\n\tconfig.Config\n\n\tTestPlans []TestPlan `json:\"plans\"`\n\tServiceOffering string `json:\"service_offering\"`\n\tAppType string `json:\"app_type\"`\n}\n\ntype TestPlan struct {\n\tName string `json:\"name\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package container \/\/ import \"github.com\/docker\/docker\/integration\/container\"\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\tcontainertypes \"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/versions\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/docker\/integration\/internal\/container\"\n\t\"github.com\/docker\/docker\/internal\/test\/daemon\"\n\t\"github.com\/docker\/docker\/internal\/test\/request\"\n\t\"gotest.tools\/assert\"\n\tis \"gotest.tools\/assert\/cmp\"\n\t\"gotest.tools\/fs\"\n\t\"gotest.tools\/skip\"\n)\n\n\/\/ testIpcCheckDevExists checks whether a given mount (identified by its\n\/\/ major:minor pair from \/proc\/self\/mountinfo) exists on the host system.\n\/\/\n\/\/ The format of \/proc\/self\/mountinfo is like:\n\/\/\n\/\/ 29 23 0:24 \/ \/dev\/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw\n\/\/ ^^^^\\\n\/\/ - this is the minor:major we look for\nfunc testIpcCheckDevExists(mm string) (bool, error) {\n\tf, err := os.Open(\"\/proc\/self\/mountinfo\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tfields := strings.Fields(s.Text())\n\t\tif len(fields) < 7 {\n\t\t\tcontinue\n\t\t}\n\t\tif fields[2] == mm {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, s.Err()\n}\n\n\/\/ testIpcNonePrivateShareable is a helper function to test \"none\",\n\/\/ \"private\" and \"shareable\" modes.\nfunc testIpcNonePrivateShareable(t *testing.T, mode string, mustBeMounted bool, mustBeShared bool) {\n\tdefer setupTest(t)()\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\thostCfg := containertypes.HostConfig{\n\t\tIpcMode: containertypes.IpcMode(mode),\n\t}\n\tclient := testEnv.APIClient()\n\tctx := context.Background()\n\n\tresp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\n\terr = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ get major:minor pair for \/dev\/shm from container's \/proc\/self\/mountinfo\n\tcmd := \"awk '($5 == \\\"\/dev\/shm\\\") {printf $3}' \/proc\/self\/mountinfo\"\n\tresult, err := container.Exec(ctx, client, resp.ID, []string{\"sh\", \"-c\", cmd})\n\tassert.NilError(t, err)\n\tmm := result.Combined()\n\tif !mustBeMounted {\n\t\tassert.Check(t, is.Equal(mm, \"\"))\n\t\t\/\/ no more checks to perform\n\t\treturn\n\t}\n\tassert.Check(t, is.Equal(true, regexp.MustCompile(\"^[0-9]+:[0-9]+$\").MatchString(mm)))\n\n\tshared, err := testIpcCheckDevExists(mm)\n\tassert.NilError(t, err)\n\tt.Logf(\"[testIpcPrivateShareable] ipcmode: %v, ipcdev: %v, shared: %v, mustBeShared: %v\\n\", mode, mm, shared, mustBeShared)\n\tassert.Check(t, is.Equal(shared, mustBeShared))\n}\n\n\/\/ TestIpcModeNone checks the container \"none\" IPC mode\n\/\/ (--ipc none) works as expected. It makes sure there is no\n\/\/ \/dev\/shm mount inside the container.\nfunc TestIpcModeNone(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcNonePrivateShareable(t, \"none\", false, false)\n}\n\n\/\/ TestAPIIpcModePrivate checks the container private IPC mode\n\/\/ (--ipc private) works as expected. It gets the minor:major pair\n\/\/ of \/dev\/shm mount from the container, and makes sure there is no\n\/\/ such pair on the host.\nfunc TestIpcModePrivate(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcNonePrivateShareable(t, \"private\", true, false)\n}\n\n\/\/ TestAPIIpcModeShareable checks the container shareable IPC mode\n\/\/ (--ipc shareable) works as expected. It gets the minor:major pair\n\/\/ of \/dev\/shm mount from the container, and makes sure such pair\n\/\/ also exists on the host.\nfunc TestIpcModeShareable(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcNonePrivateShareable(t, \"shareable\", true, true)\n}\n\n\/\/ testIpcContainer is a helper function to test --ipc container:NNN mode in various scenarios\nfunc testIpcContainer(t *testing.T, donorMode string, mustWork bool) {\n\tt.Helper()\n\n\tdefer setupTest(t)()\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\thostCfg := containertypes.HostConfig{\n\t\tIpcMode: containertypes.IpcMode(donorMode),\n\t}\n\tctx := context.Background()\n\tclient := testEnv.APIClient()\n\n\t\/\/ create and start the \"donor\" container\n\tresp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\tname1 := resp.ID\n\n\terr = client.ContainerStart(ctx, name1, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ create and start the second container\n\thostCfg.IpcMode = containertypes.IpcMode(\"container:\" + name1)\n\tresp, err = client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\tname2 := resp.ID\n\n\terr = client.ContainerStart(ctx, name2, types.ContainerStartOptions{})\n\tif !mustWork {\n\t\t\/\/ start should fail with a specific error\n\t\tassert.Check(t, is.ErrorContains(err, \"non-shareable IPC\"))\n\t\t\/\/ no more checks to perform here\n\t\treturn\n\t}\n\n\t\/\/ start should succeed\n\tassert.NilError(t, err)\n\n\t\/\/ check that IPC is shared\n\t\/\/ 1. create a file in the first container\n\t_, err = container.Exec(ctx, client, name1, []string{\"sh\", \"-c\", \"printf covfefe > \/dev\/shm\/bar\"})\n\tassert.NilError(t, err)\n\t\/\/ 2. check it's the same file in the second one\n\tresult, err := container.Exec(ctx, client, name2, []string{\"cat\", \"\/dev\/shm\/bar\"})\n\tassert.NilError(t, err)\n\tout := result.Combined()\n\tassert.Check(t, is.Equal(true, regexp.MustCompile(\"^covfefe$\").MatchString(out)))\n}\n\n\/\/ TestAPIIpcModeShareableAndPrivate checks that\n\/\/ 1) a container created with --ipc container:ID can use IPC of another shareable container.\n\/\/ 2) a container created with --ipc container:ID can NOT use IPC of another private container.\nfunc TestAPIIpcModeShareableAndContainer(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcContainer(t, \"shareable\", true)\n\n\ttestIpcContainer(t, \"private\", false)\n}\n\n\/* TestAPIIpcModeHost checks that a container created with --ipc host\n * can use IPC of the host system.\n *\/\nfunc TestAPIIpcModeHost(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\tskip.If(t, testEnv.IsUserNamespace)\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\thostCfg := containertypes.HostConfig{\n\t\tIpcMode: containertypes.IpcMode(\"host\"),\n\t}\n\tctx := context.Background()\n\n\tclient := testEnv.APIClient()\n\tresp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\tname := resp.ID\n\n\terr = client.ContainerStart(ctx, name, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ check that IPC is shared\n\t\/\/ 1. create a file inside container\n\t_, err = container.Exec(ctx, client, name, []string{\"sh\", \"-c\", \"printf covfefe > \/dev\/shm\/.\" + name})\n\tassert.NilError(t, err)\n\t\/\/ 2. check it's the same on the host\n\tbytes, err := ioutil.ReadFile(\"\/dev\/shm\/.\" + name)\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(\"covfefe\", string(bytes)))\n\t\/\/ 3. clean up\n\t_, err = container.Exec(ctx, client, name, []string{\"rm\", \"-f\", \"\/dev\/shm\/.\" + name})\n\tassert.NilError(t, err)\n}\n\n\/\/ testDaemonIpcPrivateShareable is a helper function to test \"private\" and \"shareable\" daemon default ipc modes.\nfunc testDaemonIpcPrivateShareable(t *testing.T, mustBeShared bool, arg ...string) {\n\tdefer setupTest(t)()\n\n\td := daemon.New(t)\n\td.StartWithBusybox(t, arg...)\n\tdefer d.Stop(t)\n\n\tc := d.NewClientT(t)\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\tctx := context.Background()\n\n\tresp, err := c.ContainerCreate(ctx, &cfg, &containertypes.HostConfig{}, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\n\terr = c.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ get major:minor pair for \/dev\/shm from container's \/proc\/self\/mountinfo\n\tcmd := \"awk '($5 == \\\"\/dev\/shm\\\") {printf $3}' \/proc\/self\/mountinfo\"\n\tresult, err := container.Exec(ctx, c, resp.ID, []string{\"sh\", \"-c\", cmd})\n\tassert.NilError(t, err)\n\tmm := result.Combined()\n\tassert.Check(t, is.Equal(true, regexp.MustCompile(\"^[0-9]+:[0-9]+$\").MatchString(mm)))\n\n\tshared, err := testIpcCheckDevExists(mm)\n\tassert.NilError(t, err)\n\tt.Logf(\"[testDaemonIpcPrivateShareable] ipcdev: %v, shared: %v, mustBeShared: %v\\n\", mm, shared, mustBeShared)\n\tassert.Check(t, is.Equal(shared, mustBeShared))\n}\n\n\/\/ TestDaemonIpcModeShareable checks that --default-ipc-mode shareable works as intended.\nfunc TestDaemonIpcModeShareable(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcPrivateShareable(t, true, \"--default-ipc-mode\", \"shareable\")\n}\n\n\/\/ TestDaemonIpcModePrivate checks that --default-ipc-mode private works as intended.\nfunc TestDaemonIpcModePrivate(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcPrivateShareable(t, false, \"--default-ipc-mode\", \"private\")\n}\n\n\/\/ used to check if an IpcMode given in config works as intended\nfunc testDaemonIpcFromConfig(t *testing.T, mode string, mustExist bool) {\n\tconfig := `{\"default-ipc-mode\": \"` + mode + `\"}`\n\tfile := fs.NewFile(t, \"test-daemon-ipc-config\", fs.WithContent(config))\n\tdefer file.Remove()\n\n\ttestDaemonIpcPrivateShareable(t, mustExist, \"--config-file\", file.Path())\n}\n\n\/\/ TestDaemonIpcModePrivateFromConfig checks that \"default-ipc-mode: private\" config works as intended.\nfunc TestDaemonIpcModePrivateFromConfig(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcFromConfig(t, \"private\", false)\n}\n\n\/\/ TestDaemonIpcModeShareableFromConfig checks that \"default-ipc-mode: shareable\" config works as intended.\nfunc TestDaemonIpcModeShareableFromConfig(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcFromConfig(t, \"shareable\", true)\n}\n\n\/\/ TestIpcModeOlderClient checks that older client gets shareable IPC mode\n\/\/ by default, even when the daemon default is private.\nfunc TestIpcModeOlderClient(t *testing.T) {\n\tskip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), \"1.40\"), \"requires a daemon with DefaultIpcMode: private\")\n\tt.Parallel()\n\n\tctx := context.Background()\n\n\t\/\/ pre-check: default ipc mode in daemon is private\n\tc := testEnv.APIClient()\n\tcID := container.Create(t, ctx, c, container.WithAutoRemove)\n\n\tinspect, err := c.ContainerInspect(ctx, cID)\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), \"private\"))\n\n\t\/\/ main check: using older client creates \"shareable\" container\n\tc = request.NewAPIClient(t, client.WithVersion(\"1.39\"))\n\tcID = container.Create(t, ctx, c, container.WithAutoRemove)\n\n\tinspect, err = c.ContainerInspect(ctx, cID)\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), \"shareable\"))\n}\n<commit_msg>TestIpcModeOlderClient: skip if client < 1.40<commit_after>package container \/\/ import \"github.com\/docker\/docker\/integration\/container\"\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\tcontainertypes \"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/versions\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/docker\/integration\/internal\/container\"\n\t\"github.com\/docker\/docker\/internal\/test\/daemon\"\n\t\"github.com\/docker\/docker\/internal\/test\/request\"\n\t\"gotest.tools\/assert\"\n\tis \"gotest.tools\/assert\/cmp\"\n\t\"gotest.tools\/fs\"\n\t\"gotest.tools\/skip\"\n)\n\n\/\/ testIpcCheckDevExists checks whether a given mount (identified by its\n\/\/ major:minor pair from \/proc\/self\/mountinfo) exists on the host system.\n\/\/\n\/\/ The format of \/proc\/self\/mountinfo is like:\n\/\/\n\/\/ 29 23 0:24 \/ \/dev\/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw\n\/\/ ^^^^\\\n\/\/ - this is the minor:major we look for\nfunc testIpcCheckDevExists(mm string) (bool, error) {\n\tf, err := os.Open(\"\/proc\/self\/mountinfo\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tfields := strings.Fields(s.Text())\n\t\tif len(fields) < 7 {\n\t\t\tcontinue\n\t\t}\n\t\tif fields[2] == mm {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, s.Err()\n}\n\n\/\/ testIpcNonePrivateShareable is a helper function to test \"none\",\n\/\/ \"private\" and \"shareable\" modes.\nfunc testIpcNonePrivateShareable(t *testing.T, mode string, mustBeMounted bool, mustBeShared bool) {\n\tdefer setupTest(t)()\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\thostCfg := containertypes.HostConfig{\n\t\tIpcMode: containertypes.IpcMode(mode),\n\t}\n\tclient := testEnv.APIClient()\n\tctx := context.Background()\n\n\tresp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\n\terr = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ get major:minor pair for \/dev\/shm from container's \/proc\/self\/mountinfo\n\tcmd := \"awk '($5 == \\\"\/dev\/shm\\\") {printf $3}' \/proc\/self\/mountinfo\"\n\tresult, err := container.Exec(ctx, client, resp.ID, []string{\"sh\", \"-c\", cmd})\n\tassert.NilError(t, err)\n\tmm := result.Combined()\n\tif !mustBeMounted {\n\t\tassert.Check(t, is.Equal(mm, \"\"))\n\t\t\/\/ no more checks to perform\n\t\treturn\n\t}\n\tassert.Check(t, is.Equal(true, regexp.MustCompile(\"^[0-9]+:[0-9]+$\").MatchString(mm)))\n\n\tshared, err := testIpcCheckDevExists(mm)\n\tassert.NilError(t, err)\n\tt.Logf(\"[testIpcPrivateShareable] ipcmode: %v, ipcdev: %v, shared: %v, mustBeShared: %v\\n\", mode, mm, shared, mustBeShared)\n\tassert.Check(t, is.Equal(shared, mustBeShared))\n}\n\n\/\/ TestIpcModeNone checks the container \"none\" IPC mode\n\/\/ (--ipc none) works as expected. It makes sure there is no\n\/\/ \/dev\/shm mount inside the container.\nfunc TestIpcModeNone(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcNonePrivateShareable(t, \"none\", false, false)\n}\n\n\/\/ TestAPIIpcModePrivate checks the container private IPC mode\n\/\/ (--ipc private) works as expected. It gets the minor:major pair\n\/\/ of \/dev\/shm mount from the container, and makes sure there is no\n\/\/ such pair on the host.\nfunc TestIpcModePrivate(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcNonePrivateShareable(t, \"private\", true, false)\n}\n\n\/\/ TestAPIIpcModeShareable checks the container shareable IPC mode\n\/\/ (--ipc shareable) works as expected. It gets the minor:major pair\n\/\/ of \/dev\/shm mount from the container, and makes sure such pair\n\/\/ also exists on the host.\nfunc TestIpcModeShareable(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcNonePrivateShareable(t, \"shareable\", true, true)\n}\n\n\/\/ testIpcContainer is a helper function to test --ipc container:NNN mode in various scenarios\nfunc testIpcContainer(t *testing.T, donorMode string, mustWork bool) {\n\tt.Helper()\n\n\tdefer setupTest(t)()\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\thostCfg := containertypes.HostConfig{\n\t\tIpcMode: containertypes.IpcMode(donorMode),\n\t}\n\tctx := context.Background()\n\tclient := testEnv.APIClient()\n\n\t\/\/ create and start the \"donor\" container\n\tresp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\tname1 := resp.ID\n\n\terr = client.ContainerStart(ctx, name1, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ create and start the second container\n\thostCfg.IpcMode = containertypes.IpcMode(\"container:\" + name1)\n\tresp, err = client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\tname2 := resp.ID\n\n\terr = client.ContainerStart(ctx, name2, types.ContainerStartOptions{})\n\tif !mustWork {\n\t\t\/\/ start should fail with a specific error\n\t\tassert.Check(t, is.ErrorContains(err, \"non-shareable IPC\"))\n\t\t\/\/ no more checks to perform here\n\t\treturn\n\t}\n\n\t\/\/ start should succeed\n\tassert.NilError(t, err)\n\n\t\/\/ check that IPC is shared\n\t\/\/ 1. create a file in the first container\n\t_, err = container.Exec(ctx, client, name1, []string{\"sh\", \"-c\", \"printf covfefe > \/dev\/shm\/bar\"})\n\tassert.NilError(t, err)\n\t\/\/ 2. check it's the same file in the second one\n\tresult, err := container.Exec(ctx, client, name2, []string{\"cat\", \"\/dev\/shm\/bar\"})\n\tassert.NilError(t, err)\n\tout := result.Combined()\n\tassert.Check(t, is.Equal(true, regexp.MustCompile(\"^covfefe$\").MatchString(out)))\n}\n\n\/\/ TestAPIIpcModeShareableAndPrivate checks that\n\/\/ 1) a container created with --ipc container:ID can use IPC of another shareable container.\n\/\/ 2) a container created with --ipc container:ID can NOT use IPC of another private container.\nfunc TestAPIIpcModeShareableAndContainer(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestIpcContainer(t, \"shareable\", true)\n\n\ttestIpcContainer(t, \"private\", false)\n}\n\n\/* TestAPIIpcModeHost checks that a container created with --ipc host\n * can use IPC of the host system.\n *\/\nfunc TestAPIIpcModeHost(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\tskip.If(t, testEnv.IsUserNamespace)\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\thostCfg := containertypes.HostConfig{\n\t\tIpcMode: containertypes.IpcMode(\"host\"),\n\t}\n\tctx := context.Background()\n\n\tclient := testEnv.APIClient()\n\tresp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\tname := resp.ID\n\n\terr = client.ContainerStart(ctx, name, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ check that IPC is shared\n\t\/\/ 1. create a file inside container\n\t_, err = container.Exec(ctx, client, name, []string{\"sh\", \"-c\", \"printf covfefe > \/dev\/shm\/.\" + name})\n\tassert.NilError(t, err)\n\t\/\/ 2. check it's the same on the host\n\tbytes, err := ioutil.ReadFile(\"\/dev\/shm\/.\" + name)\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(\"covfefe\", string(bytes)))\n\t\/\/ 3. clean up\n\t_, err = container.Exec(ctx, client, name, []string{\"rm\", \"-f\", \"\/dev\/shm\/.\" + name})\n\tassert.NilError(t, err)\n}\n\n\/\/ testDaemonIpcPrivateShareable is a helper function to test \"private\" and \"shareable\" daemon default ipc modes.\nfunc testDaemonIpcPrivateShareable(t *testing.T, mustBeShared bool, arg ...string) {\n\tdefer setupTest(t)()\n\n\td := daemon.New(t)\n\td.StartWithBusybox(t, arg...)\n\tdefer d.Stop(t)\n\n\tc := d.NewClientT(t)\n\n\tcfg := containertypes.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\tctx := context.Background()\n\n\tresp, err := c.ContainerCreate(ctx, &cfg, &containertypes.HostConfig{}, nil, \"\")\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(len(resp.Warnings), 0))\n\n\terr = c.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})\n\tassert.NilError(t, err)\n\n\t\/\/ get major:minor pair for \/dev\/shm from container's \/proc\/self\/mountinfo\n\tcmd := \"awk '($5 == \\\"\/dev\/shm\\\") {printf $3}' \/proc\/self\/mountinfo\"\n\tresult, err := container.Exec(ctx, c, resp.ID, []string{\"sh\", \"-c\", cmd})\n\tassert.NilError(t, err)\n\tmm := result.Combined()\n\tassert.Check(t, is.Equal(true, regexp.MustCompile(\"^[0-9]+:[0-9]+$\").MatchString(mm)))\n\n\tshared, err := testIpcCheckDevExists(mm)\n\tassert.NilError(t, err)\n\tt.Logf(\"[testDaemonIpcPrivateShareable] ipcdev: %v, shared: %v, mustBeShared: %v\\n\", mm, shared, mustBeShared)\n\tassert.Check(t, is.Equal(shared, mustBeShared))\n}\n\n\/\/ TestDaemonIpcModeShareable checks that --default-ipc-mode shareable works as intended.\nfunc TestDaemonIpcModeShareable(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcPrivateShareable(t, true, \"--default-ipc-mode\", \"shareable\")\n}\n\n\/\/ TestDaemonIpcModePrivate checks that --default-ipc-mode private works as intended.\nfunc TestDaemonIpcModePrivate(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcPrivateShareable(t, false, \"--default-ipc-mode\", \"private\")\n}\n\n\/\/ used to check if an IpcMode given in config works as intended\nfunc testDaemonIpcFromConfig(t *testing.T, mode string, mustExist bool) {\n\tconfig := `{\"default-ipc-mode\": \"` + mode + `\"}`\n\tfile := fs.NewFile(t, \"test-daemon-ipc-config\", fs.WithContent(config))\n\tdefer file.Remove()\n\n\ttestDaemonIpcPrivateShareable(t, mustExist, \"--config-file\", file.Path())\n}\n\n\/\/ TestDaemonIpcModePrivateFromConfig checks that \"default-ipc-mode: private\" config works as intended.\nfunc TestDaemonIpcModePrivateFromConfig(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcFromConfig(t, \"private\", false)\n}\n\n\/\/ TestDaemonIpcModeShareableFromConfig checks that \"default-ipc-mode: shareable\" config works as intended.\nfunc TestDaemonIpcModeShareableFromConfig(t *testing.T) {\n\tskip.If(t, testEnv.IsRemoteDaemon)\n\n\ttestDaemonIpcFromConfig(t, \"shareable\", true)\n}\n\n\/\/ TestIpcModeOlderClient checks that older client gets shareable IPC mode\n\/\/ by default, even when the daemon default is private.\nfunc TestIpcModeOlderClient(t *testing.T) {\n\tskip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), \"1.40\"), \"requires a daemon with DefaultIpcMode: private\")\n\tc := testEnv.APIClient()\n\tskip.If(t, versions.LessThan(c.ClientVersion(), \"1.40\"), \"requires client API >= 1.40\")\n\n\tt.Parallel()\n\n\tctx := context.Background()\n\n\t\/\/ pre-check: default ipc mode in daemon is private\n\tcID := container.Create(t, ctx, c, container.WithAutoRemove)\n\n\tinspect, err := c.ContainerInspect(ctx, cID)\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), \"private\"))\n\n\t\/\/ main check: using older client creates \"shareable\" container\n\tc = request.NewAPIClient(t, client.WithVersion(\"1.39\"))\n\tcID = container.Create(t, ctx, c, container.WithAutoRemove)\n\n\tinspect, err = c.ContainerInspect(ctx, cID)\n\tassert.NilError(t, err)\n\tassert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), \"shareable\"))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\/\/ +build integration\n\npackage mtail_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/mtail\/internal\/mtail\"\n\t\"github.com\/google\/mtail\/internal\/testutil\"\n)\n\nfunc TestLogGlobMatchesAfterStartupWithPollInterval(t *testing.T) {\n\tfor _, pollInterval := range []time.Duration{0, 250 * time.Millisecond} {\n\t\tt.Run(fmt.Sprintf(\"%s\", pollInterval), func(t *testing.T) {\n\t\t\ttmpDir, rmTmpDir := testutil.TestTempDir(t)\n\t\t\tdefer rmTmpDir()\n\n\t\t\tlogDir := path.Join(tmpDir, \"logs\")\n\t\t\tprogDir := path.Join(tmpDir, \"progs\")\n\t\t\terr := os.Mkdir(logDir, 0700)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\terr = os.Mkdir(progDir, 0700)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer testutil.TestChdir(t, logDir)()\n\n\t\t\tm, stopM := mtail.TestStartServer(t, pollInterval, false, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+\"\/log*\"))\n\t\t\tdefer stopM()\n\n\t\t\tstartLogCount := mtail.TestGetMetric(t, m.Addr(), \"log_count\")\n\t\t\tstartLineCount := mtail.TestGetMetric(t, m.Addr(), \"line_count\")\n\n\t\t\t{\n\t\t\t\tlogFile := path.Join(logDir, \"log\")\n\t\t\t\tf := testutil.TestOpenFile(t, logFile)\n\t\t\t\tn, err := f.WriteString(\"line 1\\n\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Wrote %d bytes\", n)\n\t\t\t\ttime.Sleep(time.Second)\n\n\t\t\t\tlogCount := mtail.TestGetMetric(t, m.Addr(), \"log_count\")\n\t\t\t\tlineCount := mtail.TestGetMetric(t, m.Addr(), \"line_count\")\n\n\t\t\t\tif logCount.(float64)-startLogCount.(float64) != 1. {\n\t\t\t\t\tt.Errorf(\"Unexpected log count: got %g, want 1\", logCount.(float64)-startLogCount.(float64))\n\t\t\t\t}\n\t\t\t\tif lineCount.(float64)-startLineCount.(float64) != 1. {\n\t\t\t\t\tt.Errorf(\"Unexpected line count: got %g, want 1\", lineCount.(float64)-startLineCount.(float64))\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t\t{\n\n\t\t\t\tlogFile := path.Join(logDir, \"log1\")\n\t\t\t\tf := testutil.TestOpenFile(t, logFile)\n\t\t\t\tn, err := f.WriteString(\"line 1\\n\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Wrote %d bytes\", n)\n\t\t\t\ttime.Sleep(time.Second)\n\n\t\t\t\tlogCount := mtail.TestGetMetric(t, m.Addr(), \"log_count\")\n\t\t\t\tlineCount := mtail.TestGetMetric(t, m.Addr(), \"line_count\")\n\n\t\t\t\tif logCount.(float64)-startLogCount.(float64) != 2. {\n\t\t\t\t\tt.Errorf(\"Unexpected log count: got %g, want 2\", logCount.(float64)-startLogCount.(float64))\n\t\t\t\t}\n\t\t\t\tif lineCount.(float64)-startLineCount.(float64) != 2. {\n\t\t\t\t\tt.Errorf(\"Unexpected line count: got %g, want 2\", lineCount.(float64)-startLineCount.(float64))\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Refactor test to use testutil.<commit_after>\/\/ Copyright 2019 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\/\/ +build integration\n\npackage mtail_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/mtail\/internal\/mtail\"\n\t\"github.com\/google\/mtail\/internal\/testutil\"\n)\n\nfunc TestLogGlobMatchesAfterStartupWithPollInterval(t *testing.T) {\n\tfor _, pollInterval := range []time.Duration{0, 250 * time.Millisecond} {\n\t\tt.Run(fmt.Sprintf(\"%s\", pollInterval), func(t *testing.T) {\n\t\t\ttmpDir, rmTmpDir := testutil.TestTempDir(t)\n\t\t\tdefer rmTmpDir()\n\n\t\t\tlogDir := path.Join(tmpDir, \"logs\")\n\t\t\tprogDir := path.Join(tmpDir, \"progs\")\n\t\t\ttestutil.FatalIfErr(t, os.Mkdir(logDir, 0700))\n\t\t\ttestutil.FatalIfErr(t, os.Mkdir(progDir, 0700))\n\t\t\tdefer testutil.TestChdir(t, logDir)()\n\n\t\t\tm, stopM := mtail.TestStartServer(t, pollInterval, false, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+\"\/log*\"))\n\t\t\tdefer stopM()\n\n\t\t\tstartLogCount := mtail.TestGetMetric(t, m.Addr(), \"log_count\")\n\t\t\tstartLineCount := mtail.TestGetMetric(t, m.Addr(), \"line_count\")\n\n\t\t\t{\n\t\t\t\tlogFile := path.Join(logDir, \"log\")\n\t\t\t\tf := testutil.TestOpenFile(t, logFile)\n\t\t\t\tn, err := f.WriteString(\"line 1\\n\")\n\t\t\t\ttestutil.FatalIfErr(t, err)\n\t\t\t\tglog.Infof(\"Wrote %d bytes\", n)\n\t\t\t\ttime.Sleep(time.Second)\n\n\t\t\t\tlogCount := mtail.TestGetMetric(t, m.Addr(), \"log_count\")\n\t\t\t\tlineCount := mtail.TestGetMetric(t, m.Addr(), \"line_count\")\n\n\t\t\t\tif logCount.(float64)-startLogCount.(float64) != 1. {\n\t\t\t\t\tt.Errorf(\"Unexpected log count: got %g, want 1\", logCount.(float64)-startLogCount.(float64))\n\t\t\t\t}\n\t\t\t\tif lineCount.(float64)-startLineCount.(float64) != 1. {\n\t\t\t\t\tt.Errorf(\"Unexpected line count: got %g, want 1\", lineCount.(float64)-startLineCount.(float64))\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t\t{\n\n\t\t\t\tlogFile := path.Join(logDir, \"log1\")\n\t\t\t\tf := testutil.TestOpenFile(t, logFile)\n\t\t\t\tn, err := f.WriteString(\"line 1\\n\")\n\t\t\t\ttestutil.FatalIfErr(t, err)\n\t\t\t\tglog.Infof(\"Wrote %d bytes\", n)\n\t\t\t\ttime.Sleep(time.Second)\n\n\t\t\t\tlogCount := mtail.TestGetMetric(t, m.Addr(), \"log_count\")\n\t\t\t\tlineCount := mtail.TestGetMetric(t, m.Addr(), \"line_count\")\n\n\t\t\t\tif logCount.(float64)-startLogCount.(float64) != 2. {\n\t\t\t\t\tt.Errorf(\"Unexpected log count: got %g, want 2\", logCount.(float64)-startLogCount.(float64))\n\t\t\t\t}\n\t\t\t\tif lineCount.(float64)-startLineCount.(float64) != 2. {\n\t\t\t\t\tt.Errorf(\"Unexpected line count: got %g, want 2\", lineCount.(float64)-startLineCount.(float64))\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"cred-alert\/inflator\"\n\t\"cred-alert\/kolsch\"\n\t\"cred-alert\/mimetype\"\n\t\"cred-alert\/scanners\"\n\t\"cred-alert\/scanners\/diffscanner\"\n\t\"cred-alert\/scanners\/dirscanner\"\n\t\"cred-alert\/scanners\/filescanner\"\n\t\"cred-alert\/sniff\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/mgutz\/ansi\"\n)\n\ntype Opts struct {\n\tFile string `short:\"f\" long:\"file\" description:\"the file to scan\" value-name:\"FILE\"`\n\tDiff bool `long:\"diff\" description:\"content to be scanned is a git diff\"`\n}\n\nvar red = ansi.ColorFunc(\"red+b\")\nvar green = ansi.ColorFunc(\"green+b\")\n\nfunc main() {\n\tvar opts Opts\n\n\t_, err := flags.ParseArgs(&opts, os.Args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tlogger := kolsch.NewLogger()\n\tsniffer := sniff.NewDefaultSniffer()\n\tinflate := inflator.New()\n\tdefer inflate.Close()\n\n\tvar credsFound int\n\thandler := func(logger lager.Logger, line scanners.Line) error {\n\t\tcredsFound++\n\t\tfmt.Printf(\"%s %s:%d\\n\", red(\"[CRED]\"), line.Path, line.LineNumber)\n\n\t\treturn nil\n\t}\n\n\tif opts.File != \"\" {\n\t\tfh, err := os.Open(opts.File)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err.Error())\n\t\t}\n\n\t\tbr := bufio.NewReader(fh)\n\t\tmime, isArchive := mimetype.IsArchive(logger, br)\n\t\tif isArchive {\n\t\t\tinflateDir, err := ioutil.TempDir(\"\", \"cred-alert-cli\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\t\t\tdefer os.RemoveAll(inflateDir)\n\n\t\t\tviolationsDir, err := ioutil.TempDir(\"\", \"cred-alert-cli-violations\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\n\t\t\tarchiveViolationHandler := func(logger lager.Logger, line scanners.Line) error {\n\t\t\t\tcredsFound++\n\t\t\t\trelPath, err := filepath.Rel(inflateDir, line.Path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tdestPath := filepath.Join(violationsDir, relPath)\n\t\t\t\terr = os.MkdirAll(filepath.Dir(destPath), os.ModePerm)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\terr = persistFile(line.Path, destPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"%s %s:%d\\n\", red(\"[CRED]\"), destPath, line.LineNumber)\n\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tinflateStart := time.Now()\n\t\t\tfmt.Printf(\"Inflating archive... \", inflateDir)\n\t\t\terr = inflate.Inflate(logger, opts.File, inflateDir)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%s\\n\", red(\"FAILED\"))\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\t\t\tfmt.Printf(\"%s (%s)\\n\", green(\"DONE\"), time.Since(inflateStart))\n\n\t\t\tscanStart := time.Now()\n\t\t\tdirScanner := dirscanner.New(archiveViolationHandler, sniffer)\n\t\t\terr = dirScanner.Scan(logger, inflateDir)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Scan complete!\")\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Time taken:\", time.Since(scanStart))\n\t\t\tfmt.Println(\"Credentials found:\", credsFound)\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Any archive inflation errors can be found in: \", inflate.LogPath())\n\t\t} else {\n\t\t\tif strings.HasPrefix(mime, \"text\") {\n\t\t\t\tscanFile(logger, handler, sniffer, br, opts.File)\n\t\t\t}\n\t\t}\n\t} else if opts.Diff {\n\t\thandleDiff(logger, handler, opts)\n\n\t} else {\n\t\tscanFile(logger, handler, sniffer, os.Stdin, \"STDIN\")\n\t}\n\n\tif credsFound > 0 {\n\t\tos.Exit(3)\n\t}\n}\n\nfunc persistFile(srcPath, destPath string) error {\n\tdestFile, err := os.Create(destPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer destFile.Close()\n\n\tsrcFile, err := os.Open(srcPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcFile.Close()\n\n\t_, err = io.Copy(destFile, srcFile)\n\treturn err\n}\n\nfunc scanFile(\n\tlogger lager.Logger,\n\thandler sniff.ViolationHandlerFunc,\n\tsniffer sniff.Sniffer,\n\tf io.Reader,\n\tname string,\n) {\n\tscanner := filescanner.New(f, name)\n\tsniffer.Sniff(logger, scanner, handler)\n}\n\nfunc handleDiff(logger lager.Logger, handler sniff.ViolationHandlerFunc, opts Opts) {\n\tlogger.Session(\"handle-diff\")\n\tscanner := diffscanner.NewDiffScanner(os.Stdin)\n\tsniffer := sniff.NewDefaultSniffer()\n\n\tsniffer.Sniff(logger, scanner, handler)\n}\n<commit_msg>Fix inflating archive message<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"cred-alert\/inflator\"\n\t\"cred-alert\/kolsch\"\n\t\"cred-alert\/mimetype\"\n\t\"cred-alert\/scanners\"\n\t\"cred-alert\/scanners\/diffscanner\"\n\t\"cred-alert\/scanners\/dirscanner\"\n\t\"cred-alert\/scanners\/filescanner\"\n\t\"cred-alert\/sniff\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/mgutz\/ansi\"\n)\n\ntype Opts struct {\n\tFile string `short:\"f\" long:\"file\" description:\"the file to scan\" value-name:\"FILE\"`\n\tDiff bool `long:\"diff\" description:\"content to be scanned is a git diff\"`\n}\n\nvar red = ansi.ColorFunc(\"red+b\")\nvar green = ansi.ColorFunc(\"green+b\")\n\nfunc main() {\n\tvar opts Opts\n\n\t_, err := flags.ParseArgs(&opts, os.Args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tlogger := kolsch.NewLogger()\n\tsniffer := sniff.NewDefaultSniffer()\n\tinflate := inflator.New()\n\tdefer inflate.Close()\n\n\tvar credsFound int\n\thandler := func(logger lager.Logger, line scanners.Line) error {\n\t\tcredsFound++\n\t\tfmt.Printf(\"%s %s:%d\\n\", red(\"[CRED]\"), line.Path, line.LineNumber)\n\n\t\treturn nil\n\t}\n\n\tif opts.File != \"\" {\n\t\tfh, err := os.Open(opts.File)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err.Error())\n\t\t}\n\n\t\tbr := bufio.NewReader(fh)\n\t\tmime, isArchive := mimetype.IsArchive(logger, br)\n\t\tif isArchive {\n\t\t\tinflateDir, err := ioutil.TempDir(\"\", \"cred-alert-cli\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\t\t\tdefer os.RemoveAll(inflateDir)\n\n\t\t\tviolationsDir, err := ioutil.TempDir(\"\", \"cred-alert-cli-violations\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\n\t\t\tarchiveViolationHandler := func(logger lager.Logger, line scanners.Line) error {\n\t\t\t\tcredsFound++\n\t\t\t\trelPath, err := filepath.Rel(inflateDir, line.Path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tdestPath := filepath.Join(violationsDir, relPath)\n\t\t\t\terr = os.MkdirAll(filepath.Dir(destPath), os.ModePerm)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\terr = persistFile(line.Path, destPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"%s %s:%d\\n\", red(\"[CRED]\"), destPath, line.LineNumber)\n\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tinflateStart := time.Now()\n\t\t\tfmt.Printf(\"Inflating archive into %s\\n\", inflateDir)\n\t\t\terr = inflate.Inflate(logger, opts.File, inflateDir)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%s\\n\", red(\"FAILED\"))\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\t\t\tfmt.Printf(\"%s (%s)\\n\", green(\"DONE\"), time.Since(inflateStart))\n\n\t\t\tscanStart := time.Now()\n\t\t\tdirScanner := dirscanner.New(archiveViolationHandler, sniffer)\n\t\t\terr = dirScanner.Scan(logger, inflateDir)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err.Error())\n\t\t\t}\n\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Scan complete!\")\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Time taken:\", time.Since(scanStart))\n\t\t\tfmt.Println(\"Credentials found:\", credsFound)\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Any archive inflation errors can be found in: \", inflate.LogPath())\n\t\t} else {\n\t\t\tif strings.HasPrefix(mime, \"text\") {\n\t\t\t\tscanFile(logger, handler, sniffer, br, opts.File)\n\t\t\t}\n\t\t}\n\t} else if opts.Diff {\n\t\thandleDiff(logger, handler, opts)\n\n\t} else {\n\t\tscanFile(logger, handler, sniffer, os.Stdin, \"STDIN\")\n\t}\n\n\tif credsFound > 0 {\n\t\tos.Exit(3)\n\t}\n}\n\nfunc persistFile(srcPath, destPath string) error {\n\tdestFile, err := os.Create(destPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer destFile.Close()\n\n\tsrcFile, err := os.Open(srcPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcFile.Close()\n\n\t_, err = io.Copy(destFile, srcFile)\n\treturn err\n}\n\nfunc scanFile(\n\tlogger lager.Logger,\n\thandler sniff.ViolationHandlerFunc,\n\tsniffer sniff.Sniffer,\n\tf io.Reader,\n\tname string,\n) {\n\tscanner := filescanner.New(f, name)\n\tsniffer.Sniff(logger, scanner, handler)\n}\n\nfunc handleDiff(logger lager.Logger, handler sniff.ViolationHandlerFunc, opts Opts) {\n\tlogger.Session(\"handle-diff\")\n\tscanner := diffscanner.NewDiffScanner(os.Stdin)\n\tsniffer := sniff.NewDefaultSniffer()\n\n\tsniffer.Sniff(logger, scanner, handler)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage runtime contains operations that interact with Go's runtime system,\nsuch as functions to control goroutines. It also includes the low-level type information\nused by the reflect package; see reflect's documentation for the programmable\ninterface to the run-time type system.\n\nEnvironment Variables\n\nThe following environment variables ($name or %name%, depending on the host\noperating system) control the run-time behavior of Go programs. The meanings\nand use may change from release to release.\n\nThe GOGC variable sets the initial garbage collection target percentage.\nA collection is triggered when the ratio of freshly allocated data to live data\nremaining after the previous collection reaches this percentage. The default\nis GOGC=100. Setting GOGC=off disables the garbage collector entirely.\nThe runtime\/debug package's SetGCPercent function allows changing this\npercentage at run time. See http:\/\/golang.org\/pkg\/runtime\/debug\/#SetGCPercent.\n\nThe GODEBUG variable controls debug output from the runtime. GODEBUG value is\na comma-separated list of name=val pairs. Supported names are:\n\n\tallocfreetrace: setting allocfreetrace=1 causes every allocation to be\n\tprofiled and a stack trace printed on each object's allocation and free.\n\n\tefence: setting efence=1 causes the allocator to run in a mode\n\twhere each object is allocated on a unique page and addresses are\n\tnever recycled.\n\n\tgctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard\n\terror at each collection, summarizing the amount of memory collected and the\n\tlength of the pause. Setting gctrace=2 emits the same summary but also\n\trepeats each collection.\n\n\tgcdead: setting gcdead=1 causes the garbage collector to clobber all stack slots\n\tthat it thinks are dead.\n\n\tinvalidptr: defaults to invalidptr=1, causing the garbage collector and stack\n\tcopier to crash the program if an invalid pointer value (for example, 1)\n\tis found in a pointer-typed location. Setting invalidptr=0 disables this check.\n\tThis should only be used as a temporary workaround to diagnose buggy code.\n\tThe real fix is to not store integers in pointer-typed locations.\n\n\tscheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit\n\tdetailed multiline info every X milliseconds, describing state of the scheduler,\n\tprocessors, threads and goroutines.\n\n\tschedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard\n\terror every X milliseconds, summarizing the scheduler state.\n\n\tscavenge: scavenge=1 enables debugging mode of heap scavenger.\n\nThe GOMAXPROCS variable limits the number of operating system threads that\ncan execute user-level Go code simultaneously. There is no limit to the number of threads\nthat can be blocked in system calls on behalf of Go code; those do not count against\nthe GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes\nthe limit.\n\nThe GOTRACEBACK variable controls the amount of output generated when a Go\nprogram fails due to an unrecovered panic or an unexpected runtime condition.\nBy default, a failure prints a stack trace for every extant goroutine, eliding functions\ninternal to the run-time system, and then exits with exit code 2.\nIf GOTRACEBACK=0, the per-goroutine stack traces are omitted entirely.\nIf GOTRACEBACK=1, the default behavior is used.\nIf GOTRACEBACK=2, the per-goroutine stack traces include run-time functions.\nIf GOTRACEBACK=crash, the per-goroutine stack traces include run-time functions,\nand if possible the program crashes in an operating-specific manner instead of\nexiting. For example, on Unix systems, the program raises SIGABRT to trigger a\ncore dump.\n\nThe GOARCH, GOOS, GOPATH, and GOROOT environment variables complete\nthe set of Go environment variables. They influence the building of Go programs\n(see http:\/\/golang.org\/cmd\/go and http:\/\/golang.org\/pkg\/go\/build).\nGOARCH, GOOS, and GOROOT are recorded at compile time and made available by\nconstants or functions in this package, but they do not influence the execution\nof the run-time system.\n*\/\npackage runtime\n\n\/\/ Caller reports file and line number information about function invocations on\n\/\/ the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to ascend, with 0 identifying the caller of Caller. (For historical reasons the\n\/\/ meaning of skip differs between Caller and Callers.) The return values report the\n\/\/ program counter, file name, and line number within the file of the corresponding\n\/\/ call. The boolean ok is false if it was not possible to recover the information.\nfunc Caller(skip int) (pc uintptr, file string, line int, ok bool) {\n\t\/\/ Ask for two PCs: the one we were asked for\n\t\/\/ and what it called, so that we can see if it\n\t\/\/ \"called\" sigpanic.\n\tvar rpc [2]uintptr\n\tif callers(1+skip-1, &rpc[0], 2) < 2 {\n\t\treturn\n\t}\n\tf := findfunc(rpc[1])\n\tif f == nil {\n\t\t\/\/ TODO(rsc): Probably a bug?\n\t\t\/\/ The C version said \"have retpc at least\"\n\t\t\/\/ but actually returned pc=0.\n\t\tok = true\n\t\treturn\n\t}\n\tpc = rpc[1]\n\txpc := pc\n\tg := findfunc(rpc[0])\n\t\/\/ All architectures turn faults into apparent calls to sigpanic.\n\t\/\/ If we see a call to sigpanic, we do not back up the PC to find\n\t\/\/ the line number of the call instruction, because there is no call.\n\tif xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {\n\t\txpc--\n\t}\n\tline = int(funcline(f, xpc, &file))\n\tok = true\n\treturn\n}\n\n\/\/ Callers fills the slice pc with the program counters of function invocations\n\/\/ on the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to skip before recording in pc, with 0 identifying the frame for Callers itself and\n\/\/ 1 identifying the caller of Callers.\n\/\/ It returns the number of entries written to pc.\nfunc Callers(skip int, pc []uintptr) int {\n\t\/\/ runtime.callers uses pc.array==nil as a signal\n\t\/\/ to print a stack trace. Pick off 0-length pc here\n\t\/\/ so that we don't let a nil pc slice get to it.\n\tif len(pc) == 0 {\n\t\treturn 0\n\t}\n\treturn callers(skip, &pc[0], len(pc))\n}\n\n\/\/ GOROOT returns the root of the Go tree.\n\/\/ It uses the GOROOT environment variable, if set,\n\/\/ or else the root used during the Go build.\nfunc GOROOT() string {\n\ts := gogetenv(\"GOROOT\")\n\tif s != \"\" {\n\t\treturn s\n\t}\n\treturn defaultGoroot\n}\n\n\/\/ Version returns the Go tree's version string.\n\/\/ It is either the commit hash and date at the time of the build or,\n\/\/ when possible, a release tag like \"go1.3\".\nfunc Version() string {\n\treturn theVersion\n}\n\n\/\/ GOOS is the running program's operating system target:\n\/\/ one of darwin, freebsd, linux, and so on.\nconst GOOS string = theGoos\n\n\/\/ GOARCH is the running program's architecture target:\n\/\/ 386, amd64, or arm.\nconst GOARCH string = theGoarch\n<commit_msg>runtime: update comment for Callers<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage runtime contains operations that interact with Go's runtime system,\nsuch as functions to control goroutines. It also includes the low-level type information\nused by the reflect package; see reflect's documentation for the programmable\ninterface to the run-time type system.\n\nEnvironment Variables\n\nThe following environment variables ($name or %name%, depending on the host\noperating system) control the run-time behavior of Go programs. The meanings\nand use may change from release to release.\n\nThe GOGC variable sets the initial garbage collection target percentage.\nA collection is triggered when the ratio of freshly allocated data to live data\nremaining after the previous collection reaches this percentage. The default\nis GOGC=100. Setting GOGC=off disables the garbage collector entirely.\nThe runtime\/debug package's SetGCPercent function allows changing this\npercentage at run time. See http:\/\/golang.org\/pkg\/runtime\/debug\/#SetGCPercent.\n\nThe GODEBUG variable controls debug output from the runtime. GODEBUG value is\na comma-separated list of name=val pairs. Supported names are:\n\n\tallocfreetrace: setting allocfreetrace=1 causes every allocation to be\n\tprofiled and a stack trace printed on each object's allocation and free.\n\n\tefence: setting efence=1 causes the allocator to run in a mode\n\twhere each object is allocated on a unique page and addresses are\n\tnever recycled.\n\n\tgctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard\n\terror at each collection, summarizing the amount of memory collected and the\n\tlength of the pause. Setting gctrace=2 emits the same summary but also\n\trepeats each collection.\n\n\tgcdead: setting gcdead=1 causes the garbage collector to clobber all stack slots\n\tthat it thinks are dead.\n\n\tinvalidptr: defaults to invalidptr=1, causing the garbage collector and stack\n\tcopier to crash the program if an invalid pointer value (for example, 1)\n\tis found in a pointer-typed location. Setting invalidptr=0 disables this check.\n\tThis should only be used as a temporary workaround to diagnose buggy code.\n\tThe real fix is to not store integers in pointer-typed locations.\n\n\tscheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit\n\tdetailed multiline info every X milliseconds, describing state of the scheduler,\n\tprocessors, threads and goroutines.\n\n\tschedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard\n\terror every X milliseconds, summarizing the scheduler state.\n\n\tscavenge: scavenge=1 enables debugging mode of heap scavenger.\n\nThe GOMAXPROCS variable limits the number of operating system threads that\ncan execute user-level Go code simultaneously. There is no limit to the number of threads\nthat can be blocked in system calls on behalf of Go code; those do not count against\nthe GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes\nthe limit.\n\nThe GOTRACEBACK variable controls the amount of output generated when a Go\nprogram fails due to an unrecovered panic or an unexpected runtime condition.\nBy default, a failure prints a stack trace for every extant goroutine, eliding functions\ninternal to the run-time system, and then exits with exit code 2.\nIf GOTRACEBACK=0, the per-goroutine stack traces are omitted entirely.\nIf GOTRACEBACK=1, the default behavior is used.\nIf GOTRACEBACK=2, the per-goroutine stack traces include run-time functions.\nIf GOTRACEBACK=crash, the per-goroutine stack traces include run-time functions,\nand if possible the program crashes in an operating-specific manner instead of\nexiting. For example, on Unix systems, the program raises SIGABRT to trigger a\ncore dump.\n\nThe GOARCH, GOOS, GOPATH, and GOROOT environment variables complete\nthe set of Go environment variables. They influence the building of Go programs\n(see http:\/\/golang.org\/cmd\/go and http:\/\/golang.org\/pkg\/go\/build).\nGOARCH, GOOS, and GOROOT are recorded at compile time and made available by\nconstants or functions in this package, but they do not influence the execution\nof the run-time system.\n*\/\npackage runtime\n\n\/\/ Caller reports file and line number information about function invocations on\n\/\/ the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to ascend, with 0 identifying the caller of Caller. (For historical reasons the\n\/\/ meaning of skip differs between Caller and Callers.) The return values report the\n\/\/ program counter, file name, and line number within the file of the corresponding\n\/\/ call. The boolean ok is false if it was not possible to recover the information.\nfunc Caller(skip int) (pc uintptr, file string, line int, ok bool) {\n\t\/\/ Ask for two PCs: the one we were asked for\n\t\/\/ and what it called, so that we can see if it\n\t\/\/ \"called\" sigpanic.\n\tvar rpc [2]uintptr\n\tif callers(1+skip-1, &rpc[0], 2) < 2 {\n\t\treturn\n\t}\n\tf := findfunc(rpc[1])\n\tif f == nil {\n\t\t\/\/ TODO(rsc): Probably a bug?\n\t\t\/\/ The C version said \"have retpc at least\"\n\t\t\/\/ but actually returned pc=0.\n\t\tok = true\n\t\treturn\n\t}\n\tpc = rpc[1]\n\txpc := pc\n\tg := findfunc(rpc[0])\n\t\/\/ All architectures turn faults into apparent calls to sigpanic.\n\t\/\/ If we see a call to sigpanic, we do not back up the PC to find\n\t\/\/ the line number of the call instruction, because there is no call.\n\tif xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {\n\t\txpc--\n\t}\n\tline = int(funcline(f, xpc, &file))\n\tok = true\n\treturn\n}\n\n\/\/ Callers fills the slice pc with the return program counters of function invocations\n\/\/ on the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to skip before recording in pc, with 0 identifying the frame for Callers itself and\n\/\/ 1 identifying the caller of Callers.\n\/\/ It returns the number of entries written to pc.\n\/\/\n\/\/ Note that since each slice entry pc[i] is a return program counter,\n\/\/ looking up the file and line for pc[i] (for example, using (*Func).FileLine)\n\/\/ will return the file and line number of the instruction immediately\n\/\/ following the call.\n\/\/ To look up the file and line number of the call itself, use pc[i]-1.\n\/\/ As an exception to this rule, if pc[i-1] corresponds to the function\n\/\/ runtime.sigpanic, then pc[i] is the program counter of a faulting\n\/\/ instruction and should be used without any subtraction.\nfunc Callers(skip int, pc []uintptr) int {\n\t\/\/ runtime.callers uses pc.array==nil as a signal\n\t\/\/ to print a stack trace. Pick off 0-length pc here\n\t\/\/ so that we don't let a nil pc slice get to it.\n\tif len(pc) == 0 {\n\t\treturn 0\n\t}\n\treturn callers(skip, &pc[0], len(pc))\n}\n\n\/\/ GOROOT returns the root of the Go tree.\n\/\/ It uses the GOROOT environment variable, if set,\n\/\/ or else the root used during the Go build.\nfunc GOROOT() string {\n\ts := gogetenv(\"GOROOT\")\n\tif s != \"\" {\n\t\treturn s\n\t}\n\treturn defaultGoroot\n}\n\n\/\/ Version returns the Go tree's version string.\n\/\/ It is either the commit hash and date at the time of the build or,\n\/\/ when possible, a release tag like \"go1.3\".\nfunc Version() string {\n\treturn theVersion\n}\n\n\/\/ GOOS is the running program's operating system target:\n\/\/ one of darwin, freebsd, linux, and so on.\nconst GOOS string = theGoos\n\n\/\/ GOARCH is the running program's architecture target:\n\/\/ 386, amd64, or arm.\nconst GOARCH string = theGoarch\n<|endoftext|>"} {"text":"<commit_before>\/\/ pollEndpoint is a helper utility that waits for a http endpoint to be reachable and return with http.StatusOK\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/Sirupsen\/logrus\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n\tmanet \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr-net\"\n)\n\nvar (\n\thost = flag.String(\"host\", \"\/ip4\/127.0.0.1\/tcp\/5001\", \"the multiaddr host to dial on\")\n\tendpoint = flag.String(\"ep\", \"\/version\", \"which http endpoint path to hit\")\n\ttries = flag.Int(\"tries\", 10, \"how many tries to make before failing\")\n\ttimeout = flag.Duration(\"tout\", time.Second, \"how long to wait between attempts\")\n\tverbose = flag.Bool(\"v\", false, \"verbose logging\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ extract address from host flag\n\taddr, err := ma.NewMultiaddr(*host)\n\tif err != nil {\n\t\tlog.WithField(\"err\", err).Fatal(\"NewMultiaddr() failed\")\n\t}\n\tp := addr.Protocols()\n\tif len(p) < 2 {\n\t\tlog.WithField(\"addr\", addr).Fatal(\"need two protocols in host flag (\/ip\/tcp)\")\n\t}\n\t_, host, err := manet.DialArgs(addr)\n\tif err != nil {\n\t\tlog.WithField(\"err\", err).Fatal(\"manet.DialArgs() failed\")\n\t}\n\n\tif *verbose { \/\/ lower log level\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\n\t\/\/ construct url to dial\n\tvar u url.URL\n\tu.Scheme = \"http\"\n\tu.Host = host\n\tu.Path = *endpoint\n\n\t\/\/ show what we got\n\tstart := time.Now()\n\tlog.WithFields(log.Fields{\n\t\t\"when\": start,\n\t\t\"tries\": *tries,\n\t\t\"timeout\": *timeout,\n\t\t\"url\": u.String(),\n\t}).Debug(\"starting\")\n\n\tfor *tries > 0 {\n\t\tf := log.Fields{\"tries\": *tries}\n\n\t\terr := checkOK(http.Get(u.String()))\n\t\tif err == nil {\n\t\t\tf[\"took\"] = time.Since(start)\n\t\t\tlog.WithFields(f).Println(\"status ok - endpoint reachable\")\n\t\t\tos.Exit(0)\n\t\t}\n\t\tf[\"error\"] = err\n\t\tlog.WithFields(f).Debug(\"get failed\")\n\t\ttime.Sleep(*timeout)\n\t\t*tries--\n\t}\n\n\tlog.Println(\"failed.\")\n\tos.Exit(1)\n}\n\nfunc checkOK(resp *http.Response, err error) error {\n\tif err == nil { \/\/ request worked\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode == http.StatusOK {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Response not OK. %d %s\", resp.StatusCode, resp.Status)\n\t} else if urlErr, ok := err.(*url.Error); ok { \/\/ expected error from http.Get()\n\t\tif urlErr.Op != \"Get\" || urlErr.URL != *endpoint {\n\t\t\treturn fmt.Errorf(\"wrong url or endpoint error from http.Get() %#v\", urlErr)\n\t\t}\n\t}\n\treturn err\n}\n<commit_msg>pollEndpoint: improve error output<commit_after>\/\/ pollEndpoint is a helper utility that waits for a http endpoint to be reachable and return with http.StatusOK\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/Sirupsen\/logrus\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n\tmanet \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr-net\"\n)\n\nvar (\n\thost = flag.String(\"host\", \"\/ip4\/127.0.0.1\/tcp\/5001\", \"the multiaddr host to dial on\")\n\tendpoint = flag.String(\"ep\", \"\/version\", \"which http endpoint path to hit\")\n\ttries = flag.Int(\"tries\", 10, \"how many tries to make before failing\")\n\ttimeout = flag.Duration(\"tout\", time.Second, \"how long to wait between attempts\")\n\tverbose = flag.Bool(\"v\", false, \"verbose logging\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ extract address from host flag\n\taddr, err := ma.NewMultiaddr(*host)\n\tif err != nil {\n\t\tlog.WithField(\"err\", err).Fatal(\"NewMultiaddr() failed\")\n\t}\n\tp := addr.Protocols()\n\tif len(p) < 2 {\n\t\tlog.WithField(\"addr\", addr).Fatal(\"need two protocols in host flag (\/ip\/tcp)\")\n\t}\n\t_, host, err := manet.DialArgs(addr)\n\tif err != nil {\n\t\tlog.WithField(\"err\", err).Fatal(\"manet.DialArgs() failed\")\n\t}\n\n\tif *verbose { \/\/ lower log level\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\n\t\/\/ construct url to dial\n\tvar u url.URL\n\tu.Scheme = \"http\"\n\tu.Host = host\n\tu.Path = *endpoint\n\n\t\/\/ show what we got\n\tstart := time.Now()\n\tlog.WithFields(log.Fields{\n\t\t\"when\": start,\n\t\t\"tries\": *tries,\n\t\t\"timeout\": *timeout,\n\t\t\"url\": u.String(),\n\t}).Debug(\"starting\")\n\n\tfor *tries > 0 {\n\t\tf := log.Fields{\"tries\": *tries}\n\n\t\terr := checkOK(http.Get(u.String()))\n\t\tif err == nil {\n\t\t\tf[\"took\"] = time.Since(start)\n\t\t\tlog.WithFields(f).Println(\"status ok - endpoint reachable\")\n\t\t\tos.Exit(0)\n\t\t}\n\t\tf[\"error\"] = err\n\t\tlog.WithFields(f).Debug(\"get failed\")\n\t\ttime.Sleep(*timeout)\n\t\t*tries--\n\t}\n\n\tlog.Println(\"failed.\")\n\tos.Exit(1)\n}\n\nfunc checkOK(resp *http.Response, err error) error {\n\tif err == nil { \/\/ request worked\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode == http.StatusOK {\n\t\t\treturn nil\n\t\t}\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"pollEndpoint: ioutil.ReadAll() Error: %s\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"Response not OK. %d %s %q\", resp.StatusCode, resp.Status, string(body))\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2013 Juliano Martinez <juliano@martinez.io>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Based on http:\/\/github.com\/nf\/webfront\n\n @author: Juliano Martinez\n*\/\n\npackage http_server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/fiorix\/go-redis\/redis\"\n\thpr_utils \"github.com\/ncode\/hot-potato-router\/utils\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tcfg = hpr_utils.NewConfig()\n\trc = redis.New(cfg.Options[\"redis\"][\"server_list\"])\n)\n\nfunc xff(req *http.Request) string {\n\tremote_addr := strings.Split(req.RemoteAddr, \":\")\n\tif len(remote_addr) == 0 {\n\t\treturn \"\"\n\t}\n\treturn remote_addr[0]\n}\n\ntype Server struct {\n\tmu sync.RWMutex\n\tlast time.Time\n\tproxy map[string][]Proxy\n\tbackend map[string]int\n}\n\ntype Proxy struct {\n\tConnections int64\n\tBackend string\n\thandler http.Handler\n}\n\nfunc Listen(fd int, addr string) net.Listener {\n\tvar l net.Listener\n\tvar err error\n\tif fd >= 3 {\n\t\tl, err = net.FileListener(os.NewFile(uintptr(fd), \"http\"))\n\t} else {\n\t\tl, err = net.Listen(\"tcp\", addr)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn l\n}\n\nfunc NewServer(probe time.Duration) (*Server, error) {\n\ts := new(Server)\n\ts.proxy = make(map[string][]Proxy)\n\ts.backend = make(map[string]int)\n\tgo s.probe_backends(probe)\n\treturn s, nil\n}\n\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h := s.handler(r); h != nil {\n\t\tclient := xff(r)\n\t\thpr_utils.Log(fmt.Sprintf(\"Request from: %s Url: %s\", client, r.Host))\n\t\tr.Header.Add(\"X-Forwarded-For‎\", client)\n\t\tr.Header.Add(\"X-Real-IP\", client)\n\t\th.ServeHTTP(w, r)\n\t\treturn\n\t}\n\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n}\n\nfunc (s *Server) handler(req *http.Request) http.Handler {\n\th := req.Host\n\tif i := strings.Index(h, \":\"); i >= 0 {\n\t\th = h[:i]\n\t}\n\n\t_, ok := s.proxy[h]\n\tif !ok {\n\t\tfmt.Println(fmt.Sprintf(\"hpr-backends::%s\", h))\n\t\tf, _ := rc.ZRange(fmt.Sprintf(\"hpr-backends::%s\", h), 0, -1, true)\n\t\tif len(f) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ts.mu.Lock()\n\t\tvar url string\n\t\tfor _, be := range f {\n\t\t\tcount, err := strconv.Atoi(be)\n\t\t\tlog.Println(count)\n\t\t\tif err != nil {\n\t\t\t\turl = be\n\t\t\t\tlog.Println(be)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor r := 0; r <= count; r++ {\n\t\t\t\tlog.Println(r)\n\t\t\t\tlog.Println(count)\n\t\t\t\tlog.Println(url)\n\t\t\t\ts.proxy[h] = append(s.proxy[h], Proxy{0, url, makeHandler(url)})\n\t\t\t}\n\t\t}\n\t\ts.mu.Unlock()\n\t}\n\treturn s.Next(h)\n}\n\n\/* TODO: Implement more balance algorithms *\/\nfunc (s *Server) Next(h string) http.Handler {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.backend[h]++\n\ttotal := len(s.proxy[h])\n\tif s.backend[h] == total {\n\t\ts.backend[h] = 0\n\t}\n\thpr_utils.Log(fmt.Sprintf(\"Using backend: %s Url: %s\", s.proxy[h][s.backend[h]].Backend, h))\n\treturn s.proxy[h][s.backend[h]].handler\n}\n\nfunc (s *Server) probe_backends(probe time.Duration) {\n\tfor {\n\t\ttime.Sleep(probe)\n\t\t\/\/ s.mu.Lock()\n\t\tfor key, value := range s.proxy {\n\t\t\thpr_utils.Log(fmt.Sprintf(\"Key: %s Value: %s\", key, value))\n\t\t}\n\t\t\/\/ s.mu.Unlock()\n\t}\n}\n\nfunc makeHandler(f string) http.Handler {\n\tif f != \"\" {\n\t\treturn &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = \"http\"\n\t\t\t\treq.URL.Host = f\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>simple wrr working ;)<commit_after>\/*\n Copyright 2013 Juliano Martinez <juliano@martinez.io>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Based on http:\/\/github.com\/nf\/webfront\n\n @author: Juliano Martinez\n*\/\n\npackage http_server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/fiorix\/go-redis\/redis\"\n\thpr_utils \"github.com\/ncode\/hot-potato-router\/utils\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tcfg = hpr_utils.NewConfig()\n\trc = redis.New(cfg.Options[\"redis\"][\"server_list\"])\n)\n\nfunc xff(req *http.Request) string {\n\tremote_addr := strings.Split(req.RemoteAddr, \":\")\n\tif len(remote_addr) == 0 {\n\t\treturn \"\"\n\t}\n\treturn remote_addr[0]\n}\n\ntype Server struct {\n\tmu sync.RWMutex\n\tlast time.Time\n\tproxy map[string][]Proxy\n\tbackend map[string]int\n}\n\ntype Proxy struct {\n\tConnections int64\n\tBackend string\n\thandler http.Handler\n}\n\nfunc Listen(fd int, addr string) net.Listener {\n\tvar l net.Listener\n\tvar err error\n\tif fd >= 3 {\n\t\tl, err = net.FileListener(os.NewFile(uintptr(fd), \"http\"))\n\t} else {\n\t\tl, err = net.Listen(\"tcp\", addr)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn l\n}\n\nfunc NewServer(probe time.Duration) (*Server, error) {\n\ts := new(Server)\n\ts.proxy = make(map[string][]Proxy)\n\ts.backend = make(map[string]int)\n\tgo s.probe_backends(probe)\n\treturn s, nil\n}\n\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h := s.handler(r); h != nil {\n\t\tclient := xff(r)\n\t\thpr_utils.Log(fmt.Sprintf(\"Request from: %s Url: %s\", client, r.Host))\n\t\tr.Header.Add(\"X-Forwarded-For‎\", client)\n\t\tr.Header.Add(\"X-Real-IP\", client)\n\t\th.ServeHTTP(w, r)\n\t\treturn\n\t}\n\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n}\n\nfunc (s *Server) handler(req *http.Request) http.Handler {\n\th := req.Host\n\tif i := strings.Index(h, \":\"); i >= 0 {\n\t\th = h[:i]\n\t}\n\n\t_, ok := s.proxy[h]\n\tif !ok {\n\t\tf, _ := rc.ZRange(fmt.Sprintf(\"hpr-backends::%s\", h), 0, -1, true)\n\t\tif len(f) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\tvar url string\n\t\tfor _, be := range f {\n\t\t\tcount, err := strconv.Atoi(be)\n\t\t\tif err != nil {\n\t\t\t\turl = be\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor r := 0; r <= count; r++ {\n\t\t\t\ts.mu.Lock()\n\t\t\t\ts.proxy[h] = append(s.proxy[h], Proxy{0, url, makeHandler(url)})\n\t\t\t\ts.mu.Unlock()\n\t\t\t}\n\t\t}\n\n\t}\n\treturn s.Next(h)\n}\n\n\/* TODO: Implement more balance algorithms *\/\nfunc (s *Server) Next(h string) http.Handler {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.backend[h]++\n\ttotal := len(s.proxy[h])\n\tif s.backend[h] == total {\n\t\ts.backend[h] = 0\n\t}\n\thpr_utils.Log(fmt.Sprintf(\"Using backend: %s Url: %s\", s.proxy[h][s.backend[h]].Backend, h))\n\treturn s.proxy[h][s.backend[h]].handler\n}\n\nfunc (s *Server) probe_backends(probe time.Duration) {\n\tfor {\n\t\ttime.Sleep(probe)\n\t\t\/\/ s.mu.Lock()\n\t\tfor key, value := range s.proxy {\n\t\t\thpr_utils.Log(fmt.Sprintf(\"Key: %s Value: %s\", key, value))\n\t\t}\n\t\t\/\/ s.mu.Unlock()\n\t}\n}\n\nfunc makeHandler(f string) http.Handler {\n\tif f != \"\" {\n\t\treturn &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = \"http\"\n\t\t\t\treq.URL.Host = f\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2013 Juliano Martinez <juliano@martinez.io>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Based on http:\/\/github.com\/nf\/webfront\n\n @author: Juliano Martinez\n*\/\n\npackage http_server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/fiorix\/go-redis\/redis\"\n\thpr_utils \"github.com\/ncode\/hot-potato-router\/utils\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tcfg = hpr_utils.NewConfig()\n\trc = redis.New(cfg.Options[\"redis\"][\"server_list\"])\n)\n\nfunc xff(req *http.Request) string {\n\tremote_addr := strings.Split(req.RemoteAddr, \":\")\n\tif len(remote_addr) == 0 {\n\t\treturn \"\"\n\t}\n\treturn remote_addr[0]\n}\n\ntype Server struct {\n\tmu sync.RWMutex\n\tlast time.Time\n\tproxy map[string][]Proxy\n\tbackend map[string]int\n}\n\ntype Proxy struct {\n\tConnections int64\n\tBackend string\n\thandler http.Handler\n}\n\nfunc Listen(fd int, addr string) net.Listener {\n\tvar l net.Listener\n\tvar err error\n\tif fd >= 3 {\n\t\tl, err = net.FileListener(os.NewFile(uintptr(fd), \"http\"))\n\t} else {\n\t\tl, err = net.Listen(\"tcp\", addr)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn l\n}\n\nfunc NewServer(probe time.Duration) (*Server, error) {\n\ts := new(Server)\n\ts.proxy = make(map[string][]Proxy)\n\ts.backend = make(map[string]int)\n\tgo s.probe_backends(probe)\n\treturn s, nil\n}\n\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h := s.handler(r); h != nil {\n\t\tclient := xff(r)\n\t\thpr_utils.Log(fmt.Sprintf(\"Request from: %s Url: %s\", client, r.Host))\n\t\tr.Header.Add(\"X-Forwarded-For‎\", client)\n\t\tr.Header.Add(\"X-Real-IP\", client)\n\t\th.ServeHTTP(w, r)\n\t\treturn\n\t}\n\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n}\n\nfunc (s *Server) handler(req *http.Request) http.Handler {\n\th := req.Host\n\tif i := strings.Index(h, \":\"); i >= 0 {\n\t\th = h[:i]\n\t}\n\n\t_, ok := s.proxy[h]\n\tif !ok {\n\t\tfmt.Println(fmt.Sprintf(\"hpr-backends::%s\", h))\n\t\tf, _ := rc.ZRange(fmt.Sprintf(\"hpr-backends::%s\", h), 0, -1, true)\n\t\tif len(f) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ts.mu.Lock()\n\t\tvar url string\n\t\tfor _, be := range f {\n\t\t\tcount, err := strconv.Atoi(be)\n\t\t\tlog.Println(count)\n\t\t\tif err != nil {\n\t\t\t\turl = be\n\t\t\t\tlog.Println(be)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor r := 0; r >= count; r++ {\n\t\t\t\tlog.Println(r)\n\t\t\t\tlog.Println(count)\n\t\t\t\tlog.Println(url)\n\t\t\t\ts.proxy[h] = append(s.proxy[h], Proxy{0, url, makeHandler(url)})\n\t\t\t}\n\t\t}\n\t\ts.mu.Unlock()\n\t}\n\treturn s.Next(h)\n}\n\n\/* TODO: Implement more balance algorithms *\/\nfunc (s *Server) Next(h string) http.Handler {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.backend[h]++\n\ttotal := len(s.proxy[h])\n\tif s.backend[h] == total {\n\t\ts.backend[h] = 0\n\t}\n\thpr_utils.Log(fmt.Sprintf(\"Using backend: %s Url: %s\", s.proxy[h][s.backend[h]].Backend, h))\n\treturn s.proxy[h][s.backend[h]].handler\n}\n\nfunc (s *Server) probe_backends(probe time.Duration) {\n\tfor {\n\t\ttime.Sleep(probe)\n\t\t\/\/ s.mu.Lock()\n\t\tfor key, value := range s.proxy {\n\t\t\thpr_utils.Log(fmt.Sprintf(\"Key: %s Value: %s\", key, value))\n\t\t}\n\t\t\/\/ s.mu.Unlock()\n\t}\n}\n\nfunc makeHandler(f string) http.Handler {\n\tif f != \"\" {\n\t\treturn &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = \"http\"\n\t\t\t\treq.URL.Host = f\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>lalala<commit_after>\/*\n Copyright 2013 Juliano Martinez <juliano@martinez.io>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Based on http:\/\/github.com\/nf\/webfront\n\n @author: Juliano Martinez\n*\/\n\npackage http_server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/fiorix\/go-redis\/redis\"\n\thpr_utils \"github.com\/ncode\/hot-potato-router\/utils\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tcfg = hpr_utils.NewConfig()\n\trc = redis.New(cfg.Options[\"redis\"][\"server_list\"])\n)\n\nfunc xff(req *http.Request) string {\n\tremote_addr := strings.Split(req.RemoteAddr, \":\")\n\tif len(remote_addr) == 0 {\n\t\treturn \"\"\n\t}\n\treturn remote_addr[0]\n}\n\ntype Server struct {\n\tmu sync.RWMutex\n\tlast time.Time\n\tproxy map[string][]Proxy\n\tbackend map[string]int\n}\n\ntype Proxy struct {\n\tConnections int64\n\tBackend string\n\thandler http.Handler\n}\n\nfunc Listen(fd int, addr string) net.Listener {\n\tvar l net.Listener\n\tvar err error\n\tif fd >= 3 {\n\t\tl, err = net.FileListener(os.NewFile(uintptr(fd), \"http\"))\n\t} else {\n\t\tl, err = net.Listen(\"tcp\", addr)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn l\n}\n\nfunc NewServer(probe time.Duration) (*Server, error) {\n\ts := new(Server)\n\ts.proxy = make(map[string][]Proxy)\n\ts.backend = make(map[string]int)\n\tgo s.probe_backends(probe)\n\treturn s, nil\n}\n\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h := s.handler(r); h != nil {\n\t\tclient := xff(r)\n\t\thpr_utils.Log(fmt.Sprintf(\"Request from: %s Url: %s\", client, r.Host))\n\t\tr.Header.Add(\"X-Forwarded-For‎\", client)\n\t\tr.Header.Add(\"X-Real-IP\", client)\n\t\th.ServeHTTP(w, r)\n\t\treturn\n\t}\n\thttp.Error(w, \"Not found.\", http.StatusNotFound)\n}\n\nfunc (s *Server) handler(req *http.Request) http.Handler {\n\th := req.Host\n\tif i := strings.Index(h, \":\"); i >= 0 {\n\t\th = h[:i]\n\t}\n\n\t_, ok := s.proxy[h]\n\tif !ok {\n\t\tfmt.Println(fmt.Sprintf(\"hpr-backends::%s\", h))\n\t\tf, _ := rc.ZRange(fmt.Sprintf(\"hpr-backends::%s\", h), 0, -1, true)\n\t\tif len(f) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ts.mu.Lock()\n\t\tvar url string\n\t\tfor _, be := range f {\n\t\t\tcount, err := strconv.Atoi(be)\n\t\t\tlog.Println(count)\n\t\t\tif err != nil {\n\t\t\t\turl = be\n\t\t\t\tlog.Println(be)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor r := 0; r <= count; r++ {\n\t\t\t\tlog.Println(r)\n\t\t\t\tlog.Println(count)\n\t\t\t\tlog.Println(url)\n\t\t\t\ts.proxy[h] = append(s.proxy[h], Proxy{0, url, makeHandler(url)})\n\t\t\t}\n\t\t}\n\t\ts.mu.Unlock()\n\t}\n\treturn s.Next(h)\n}\n\n\/* TODO: Implement more balance algorithms *\/\nfunc (s *Server) Next(h string) http.Handler {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.backend[h]++\n\ttotal := len(s.proxy[h])\n\tif s.backend[h] == total {\n\t\ts.backend[h] = 0\n\t}\n\thpr_utils.Log(fmt.Sprintf(\"Using backend: %s Url: %s\", s.proxy[h][s.backend[h]].Backend, h))\n\treturn s.proxy[h][s.backend[h]].handler\n}\n\nfunc (s *Server) probe_backends(probe time.Duration) {\n\tfor {\n\t\ttime.Sleep(probe)\n\t\t\/\/ s.mu.Lock()\n\t\tfor key, value := range s.proxy {\n\t\t\thpr_utils.Log(fmt.Sprintf(\"Key: %s Value: %s\", key, value))\n\t\t}\n\t\t\/\/ s.mu.Unlock()\n\t}\n}\n\nfunc makeHandler(f string) http.Handler {\n\tif f != \"\" {\n\t\treturn &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = \"http\"\n\t\t\t\treq.URL.Host = f\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/matrix-org\/dugong\"\n\t\"github.com\/matrix-org\/go-neb\/api\"\n\t\"github.com\/matrix-org\/go-neb\/api\/handlers\"\n\t\"github.com\/matrix-org\/go-neb\/clients\"\n\t\"github.com\/matrix-org\/go-neb\/database\"\n\t_ \"github.com\/matrix-org\/go-neb\/metrics\"\n\t\"github.com\/matrix-org\/go-neb\/polling\"\n\t_ \"github.com\/matrix-org\/go-neb\/realms\/github\"\n\t_ \"github.com\/matrix-org\/go-neb\/realms\/jira\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/echo\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/giphy\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/github\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/google\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/guggy\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/imgur\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/jira\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/rssbot\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/slackapi\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/travisci\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/wikipedia\"\n\t\"github.com\/matrix-org\/go-neb\/types\"\n\t\"github.com\/matrix-org\/util\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\n\/\/ loadFromConfig loads a config file and returns a ConfigFile\nfunc loadFromConfig(db *database.ServiceDB, configFilePath string) (*api.ConfigFile, error) {\n\t\/\/ ::Horrible hacks ahead::\n\t\/\/ The config is represented as YAML, and we want to convert that into NEB types.\n\t\/\/ However, NEB types make liberal use of json.RawMessage which the YAML parser\n\t\/\/ doesn't like. We can't implement MarshalYAML\/UnmarshalYAML as a custom type easily\n\t\/\/ because YAML is insane and supports numbers as keys. The YAML parser therefore has the\n\t\/\/ generic form of map[interface{}]interface{} - but the JSON parser doesn't know\n\t\/\/ how to parse that.\n\t\/\/\n\t\/\/ The hack that follows gets around this by type asserting all parsed YAML keys as\n\t\/\/ strings then re-encoding\/decoding as JSON. That is:\n\t\/\/ YAML bytes -> map[interface]interface -> map[string]interface -> JSON bytes -> NEB types\n\n\t\/\/ Convert to YAML bytes\n\tcontents, err := ioutil.ReadFile(configFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Convert to map[interface]interface\n\tvar cfg map[interface{}]interface{}\n\tif err = yaml.Unmarshal(contents, &cfg); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to unmarshal YAML: %s\", err)\n\t}\n\n\t\/\/ Convert to map[string]interface\n\tdict := convertKeysToStrings(cfg)\n\n\t\/\/ Convert to JSON bytes\n\tb, err := json.Marshal(dict)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to marshal config as JSON: %s\", err)\n\t}\n\n\t\/\/ Finally, Convert to NEB types\n\tvar c api.ConfigFile\n\tif err := json.Unmarshal(b, &c); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to convert to config file: %s\", err)\n\t}\n\n\t\/\/ sanity check (at least 1 client and 1 service)\n\tif len(c.Clients) == 0 || len(c.Services) == 0 {\n\t\treturn nil, fmt.Errorf(\"At least 1 client and 1 service must be specified\")\n\t}\n\n\treturn &c, nil\n}\n\nfunc convertKeysToStrings(iface interface{}) interface{} {\n\tobj, isObj := iface.(map[interface{}]interface{})\n\tif isObj {\n\t\tstrObj := make(map[string]interface{})\n\t\tfor k, v := range obj {\n\t\t\tstrObj[k.(string)] = convertKeysToStrings(v) \/\/ handle nested objects\n\t\t}\n\t\treturn strObj\n\t}\n\n\tarr, isArr := iface.([]interface{})\n\tif isArr {\n\t\tfor i := range arr {\n\t\t\tarr[i] = convertKeysToStrings(arr[i]) \/\/ handle nested objects\n\t\t}\n\t\treturn arr\n\t}\n\treturn iface \/\/ base type like string or number\n}\n\nfunc insertServicesFromConfig(clis *clients.Clients, serviceReqs []api.ConfigureServiceRequest) error {\n\tfor i, s := range serviceReqs {\n\t\tif err := s.Check(); err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\t\tservice, err := types.CreateService(s.ID, s.Type, s.UserID, s.Config)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\n\t\t\/\/ Fetch the client for this service and register\/poll\n\t\tc, err := clis.Client(s.UserID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\n\t\tif err = service.Register(nil, c); err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\t\tif _, err := database.GetServiceDB().StoreService(service); err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\t\tservice.PostRegister(nil)\n\t}\n\treturn nil\n}\n\nfunc loadDatabase(databaseType, databaseURL, configYAML string) (*database.ServiceDB, error) {\n\tif configYAML != \"\" {\n\t\tdatabaseType = \"sqlite3\"\n\t\tdatabaseURL = \":memory:?_busy_timeout=5000\"\n\t}\n\n\tdb, err := database.Open(databaseType, databaseURL)\n\tif err == nil {\n\t\tdatabase.SetServiceDB(db) \/\/ set singleton\n\t}\n\treturn db, err\n}\n\nfunc setup(e envVars, mux *http.ServeMux, matrixClient *http.Client) {\n\terr := types.BaseURL(e.BaseURL)\n\tif err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to get base url\")\n\t}\n\n\tdb, err := loadDatabase(e.DatabaseType, e.DatabaseURL, e.ConfigFile)\n\tif err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to open database\")\n\t}\n\n\t\/\/ Populate the database from the config file if one was supplied.\n\tvar cfg *api.ConfigFile\n\tif e.ConfigFile != \"\" {\n\t\tif cfg, err = loadFromConfig(db, e.ConfigFile); err != nil {\n\t\t\tlog.WithError(err).WithField(\"config_file\", e.ConfigFile).Panic(\"Failed to load config file\")\n\t\t}\n\t\tif err := db.InsertFromConfig(cfg); err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to persist config data into in-memory DB\")\n\t\t}\n\t\tlog.Info(\"Inserted \", len(cfg.Clients), \" clients\")\n\t\tlog.Info(\"Inserted \", len(cfg.Realms), \" realms\")\n\t\tlog.Info(\"Inserted \", len(cfg.Sessions), \" sessions\")\n\t}\n\n\tclients := clients.New(db, matrixClient)\n\tif err := clients.Start(); err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to start up clients\")\n\t}\n\n\t\/\/ Handle non-admin paths for normal NEB functioning\n\tmux.Handle(\"\/metrics\", prometheus.Handler())\n\tmux.Handle(\"\/test\", prometheus.InstrumentHandler(\"test\", util.MakeJSONAPI(&handlers.Heartbeat{})))\n\twh := handlers.NewWebhook(db, clients)\n\tmux.HandleFunc(\"\/services\/hooks\/\", prometheus.InstrumentHandlerFunc(\"webhookHandler\", util.Protect(wh.Handle)))\n\trh := &handlers.RealmRedirect{db}\n\tmux.HandleFunc(\"\/realms\/redirects\/\", prometheus.InstrumentHandlerFunc(\"realmRedirectHandler\", util.Protect(rh.Handle)))\n\n\t\/\/ Read exclusively from the config file if one was supplied.\n\t\/\/ Otherwise, add HTTP listeners for new Services\/Sessions\/Clients\/etc.\n\tif e.ConfigFile != \"\" {\n\t\tif err := insertServicesFromConfig(clients, cfg.Services); err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to insert services\")\n\t\t}\n\n\t\tlog.Info(\"Inserted \", len(cfg.Services), \" services\")\n\t} else {\n\t\tmux.Handle(\"\/admin\/getService\", prometheus.InstrumentHandler(\"getService\", util.MakeJSONAPI(&handlers.GetService{db})))\n\t\tmux.Handle(\"\/admin\/getSession\", prometheus.InstrumentHandler(\"getSession\", util.MakeJSONAPI(&handlers.GetSession{db})))\n\t\tmux.Handle(\"\/admin\/configureClient\", prometheus.InstrumentHandler(\"configureClient\", util.MakeJSONAPI(&handlers.ConfigureClient{clients})))\n\t\tmux.Handle(\"\/admin\/configureService\", prometheus.InstrumentHandler(\"configureService\", util.MakeJSONAPI(handlers.NewConfigureService(db, clients))))\n\t\tmux.Handle(\"\/admin\/configureAuthRealm\", prometheus.InstrumentHandler(\"configureAuthRealm\", util.MakeJSONAPI(&handlers.ConfigureAuthRealm{db})))\n\t\tmux.Handle(\"\/admin\/requestAuthSession\", prometheus.InstrumentHandler(\"requestAuthSession\", util.MakeJSONAPI(&handlers.RequestAuthSession{db})))\n\t\tmux.Handle(\"\/admin\/removeAuthSession\", prometheus.InstrumentHandler(\"removeAuthSession\", util.MakeJSONAPI(&handlers.RemoveAuthSession{db})))\n\t}\n\tpolling.SetClients(clients)\n\tif err := polling.Start(); err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to start polling\")\n\t}\n}\n\ntype envVars struct {\n\tBindAddress string\n\tDatabaseType string\n\tDatabaseURL string\n\tBaseURL string\n\tLogDir string\n\tConfigFile string\n}\n\nfunc main() {\n\te := envVars{\n\t\tBindAddress: os.Getenv(\"BIND_ADDRESS\"),\n\t\tDatabaseType: os.Getenv(\"DATABASE_TYPE\"),\n\t\tDatabaseURL: os.Getenv(\"DATABASE_URL\"),\n\t\tBaseURL: os.Getenv(\"BASE_URL\"),\n\t\tLogDir: os.Getenv(\"LOG_DIR\"),\n\t\tConfigFile: os.Getenv(\"CONFIG_FILE\"),\n\t}\n\n\tif e.LogDir != \"\" {\n\t\tlog.AddHook(dugong.NewFSHook(\n\t\t\tfilepath.Join(e.LogDir, \"info.log\"),\n\t\t\tfilepath.Join(e.LogDir, \"warn.log\"),\n\t\t\tfilepath.Join(e.LogDir, \"error.log\"),\n\t\t\t&log.TextFormatter{\n\t\t\t\tTimestampFormat: \"2006-01-02 15:04:05.000000\",\n\t\t\t\tDisableColors: true,\n\t\t\t\tDisableTimestamp: false,\n\t\t\t\tDisableSorting: false,\n\t\t\t}, &dugong.DailyRotationSchedule{GZip: true},\n\t\t))\n\t}\n\n\tlog.Infof(\"Go-NEB (%+v)\", e)\n\n\tsetup(e, http.DefaultServeMux, http.DefaultClient)\n\tlog.Fatal(http.ListenAndServe(e.BindAddress, nil))\n}\n<commit_msg>Turn off gzipping; this should really be a config option<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/matrix-org\/dugong\"\n\t\"github.com\/matrix-org\/go-neb\/api\"\n\t\"github.com\/matrix-org\/go-neb\/api\/handlers\"\n\t\"github.com\/matrix-org\/go-neb\/clients\"\n\t\"github.com\/matrix-org\/go-neb\/database\"\n\t_ \"github.com\/matrix-org\/go-neb\/metrics\"\n\t\"github.com\/matrix-org\/go-neb\/polling\"\n\t_ \"github.com\/matrix-org\/go-neb\/realms\/github\"\n\t_ \"github.com\/matrix-org\/go-neb\/realms\/jira\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/echo\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/giphy\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/github\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/google\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/guggy\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/imgur\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/jira\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/rssbot\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/slackapi\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/travisci\"\n\t_ \"github.com\/matrix-org\/go-neb\/services\/wikipedia\"\n\t\"github.com\/matrix-org\/go-neb\/types\"\n\t\"github.com\/matrix-org\/util\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\n\/\/ loadFromConfig loads a config file and returns a ConfigFile\nfunc loadFromConfig(db *database.ServiceDB, configFilePath string) (*api.ConfigFile, error) {\n\t\/\/ ::Horrible hacks ahead::\n\t\/\/ The config is represented as YAML, and we want to convert that into NEB types.\n\t\/\/ However, NEB types make liberal use of json.RawMessage which the YAML parser\n\t\/\/ doesn't like. We can't implement MarshalYAML\/UnmarshalYAML as a custom type easily\n\t\/\/ because YAML is insane and supports numbers as keys. The YAML parser therefore has the\n\t\/\/ generic form of map[interface{}]interface{} - but the JSON parser doesn't know\n\t\/\/ how to parse that.\n\t\/\/\n\t\/\/ The hack that follows gets around this by type asserting all parsed YAML keys as\n\t\/\/ strings then re-encoding\/decoding as JSON. That is:\n\t\/\/ YAML bytes -> map[interface]interface -> map[string]interface -> JSON bytes -> NEB types\n\n\t\/\/ Convert to YAML bytes\n\tcontents, err := ioutil.ReadFile(configFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Convert to map[interface]interface\n\tvar cfg map[interface{}]interface{}\n\tif err = yaml.Unmarshal(contents, &cfg); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to unmarshal YAML: %s\", err)\n\t}\n\n\t\/\/ Convert to map[string]interface\n\tdict := convertKeysToStrings(cfg)\n\n\t\/\/ Convert to JSON bytes\n\tb, err := json.Marshal(dict)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to marshal config as JSON: %s\", err)\n\t}\n\n\t\/\/ Finally, Convert to NEB types\n\tvar c api.ConfigFile\n\tif err := json.Unmarshal(b, &c); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to convert to config file: %s\", err)\n\t}\n\n\t\/\/ sanity check (at least 1 client and 1 service)\n\tif len(c.Clients) == 0 || len(c.Services) == 0 {\n\t\treturn nil, fmt.Errorf(\"At least 1 client and 1 service must be specified\")\n\t}\n\n\treturn &c, nil\n}\n\nfunc convertKeysToStrings(iface interface{}) interface{} {\n\tobj, isObj := iface.(map[interface{}]interface{})\n\tif isObj {\n\t\tstrObj := make(map[string]interface{})\n\t\tfor k, v := range obj {\n\t\t\tstrObj[k.(string)] = convertKeysToStrings(v) \/\/ handle nested objects\n\t\t}\n\t\treturn strObj\n\t}\n\n\tarr, isArr := iface.([]interface{})\n\tif isArr {\n\t\tfor i := range arr {\n\t\t\tarr[i] = convertKeysToStrings(arr[i]) \/\/ handle nested objects\n\t\t}\n\t\treturn arr\n\t}\n\treturn iface \/\/ base type like string or number\n}\n\nfunc insertServicesFromConfig(clis *clients.Clients, serviceReqs []api.ConfigureServiceRequest) error {\n\tfor i, s := range serviceReqs {\n\t\tif err := s.Check(); err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\t\tservice, err := types.CreateService(s.ID, s.Type, s.UserID, s.Config)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\n\t\t\/\/ Fetch the client for this service and register\/poll\n\t\tc, err := clis.Client(s.UserID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\n\t\tif err = service.Register(nil, c); err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\t\tif _, err := database.GetServiceDB().StoreService(service); err != nil {\n\t\t\treturn fmt.Errorf(\"config: Service[%d] : %s\", i, err)\n\t\t}\n\t\tservice.PostRegister(nil)\n\t}\n\treturn nil\n}\n\nfunc loadDatabase(databaseType, databaseURL, configYAML string) (*database.ServiceDB, error) {\n\tif configYAML != \"\" {\n\t\tdatabaseType = \"sqlite3\"\n\t\tdatabaseURL = \":memory:?_busy_timeout=5000\"\n\t}\n\n\tdb, err := database.Open(databaseType, databaseURL)\n\tif err == nil {\n\t\tdatabase.SetServiceDB(db) \/\/ set singleton\n\t}\n\treturn db, err\n}\n\nfunc setup(e envVars, mux *http.ServeMux, matrixClient *http.Client) {\n\terr := types.BaseURL(e.BaseURL)\n\tif err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to get base url\")\n\t}\n\n\tdb, err := loadDatabase(e.DatabaseType, e.DatabaseURL, e.ConfigFile)\n\tif err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to open database\")\n\t}\n\n\t\/\/ Populate the database from the config file if one was supplied.\n\tvar cfg *api.ConfigFile\n\tif e.ConfigFile != \"\" {\n\t\tif cfg, err = loadFromConfig(db, e.ConfigFile); err != nil {\n\t\t\tlog.WithError(err).WithField(\"config_file\", e.ConfigFile).Panic(\"Failed to load config file\")\n\t\t}\n\t\tif err := db.InsertFromConfig(cfg); err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to persist config data into in-memory DB\")\n\t\t}\n\t\tlog.Info(\"Inserted \", len(cfg.Clients), \" clients\")\n\t\tlog.Info(\"Inserted \", len(cfg.Realms), \" realms\")\n\t\tlog.Info(\"Inserted \", len(cfg.Sessions), \" sessions\")\n\t}\n\n\tclients := clients.New(db, matrixClient)\n\tif err := clients.Start(); err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to start up clients\")\n\t}\n\n\t\/\/ Handle non-admin paths for normal NEB functioning\n\tmux.Handle(\"\/metrics\", prometheus.Handler())\n\tmux.Handle(\"\/test\", prometheus.InstrumentHandler(\"test\", util.MakeJSONAPI(&handlers.Heartbeat{})))\n\twh := handlers.NewWebhook(db, clients)\n\tmux.HandleFunc(\"\/services\/hooks\/\", prometheus.InstrumentHandlerFunc(\"webhookHandler\", util.Protect(wh.Handle)))\n\trh := &handlers.RealmRedirect{db}\n\tmux.HandleFunc(\"\/realms\/redirects\/\", prometheus.InstrumentHandlerFunc(\"realmRedirectHandler\", util.Protect(rh.Handle)))\n\n\t\/\/ Read exclusively from the config file if one was supplied.\n\t\/\/ Otherwise, add HTTP listeners for new Services\/Sessions\/Clients\/etc.\n\tif e.ConfigFile != \"\" {\n\t\tif err := insertServicesFromConfig(clients, cfg.Services); err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to insert services\")\n\t\t}\n\n\t\tlog.Info(\"Inserted \", len(cfg.Services), \" services\")\n\t} else {\n\t\tmux.Handle(\"\/admin\/getService\", prometheus.InstrumentHandler(\"getService\", util.MakeJSONAPI(&handlers.GetService{db})))\n\t\tmux.Handle(\"\/admin\/getSession\", prometheus.InstrumentHandler(\"getSession\", util.MakeJSONAPI(&handlers.GetSession{db})))\n\t\tmux.Handle(\"\/admin\/configureClient\", prometheus.InstrumentHandler(\"configureClient\", util.MakeJSONAPI(&handlers.ConfigureClient{clients})))\n\t\tmux.Handle(\"\/admin\/configureService\", prometheus.InstrumentHandler(\"configureService\", util.MakeJSONAPI(handlers.NewConfigureService(db, clients))))\n\t\tmux.Handle(\"\/admin\/configureAuthRealm\", prometheus.InstrumentHandler(\"configureAuthRealm\", util.MakeJSONAPI(&handlers.ConfigureAuthRealm{db})))\n\t\tmux.Handle(\"\/admin\/requestAuthSession\", prometheus.InstrumentHandler(\"requestAuthSession\", util.MakeJSONAPI(&handlers.RequestAuthSession{db})))\n\t\tmux.Handle(\"\/admin\/removeAuthSession\", prometheus.InstrumentHandler(\"removeAuthSession\", util.MakeJSONAPI(&handlers.RemoveAuthSession{db})))\n\t}\n\tpolling.SetClients(clients)\n\tif err := polling.Start(); err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to start polling\")\n\t}\n}\n\ntype envVars struct {\n\tBindAddress string\n\tDatabaseType string\n\tDatabaseURL string\n\tBaseURL string\n\tLogDir string\n\tConfigFile string\n}\n\nfunc main() {\n\te := envVars{\n\t\tBindAddress: os.Getenv(\"BIND_ADDRESS\"),\n\t\tDatabaseType: os.Getenv(\"DATABASE_TYPE\"),\n\t\tDatabaseURL: os.Getenv(\"DATABASE_URL\"),\n\t\tBaseURL: os.Getenv(\"BASE_URL\"),\n\t\tLogDir: os.Getenv(\"LOG_DIR\"),\n\t\tConfigFile: os.Getenv(\"CONFIG_FILE\"),\n\t}\n\n\tif e.LogDir != \"\" {\n\t\tlog.AddHook(dugong.NewFSHook(\n\t\t\tfilepath.Join(e.LogDir, \"info.log\"),\n\t\t\tfilepath.Join(e.LogDir, \"warn.log\"),\n\t\t\tfilepath.Join(e.LogDir, \"error.log\"),\n\t\t\t&log.TextFormatter{\n\t\t\t\tTimestampFormat: \"2006-01-02 15:04:05.000000\",\n\t\t\t\tDisableColors: true,\n\t\t\t\tDisableTimestamp: false,\n\t\t\t\tDisableSorting: false,\n\t\t\t}, &dugong.DailyRotationSchedule{GZip: false},\n\t\t))\n\t}\n\n\tlog.Infof(\"Go-NEB (%+v)\", e)\n\n\tsetup(e, http.DefaultServeMux, http.DefaultClient)\n\tlog.Fatal(http.ListenAndServe(e.BindAddress, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package workspace\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n)\n\n\/\/ Exercise is an implementation of a problem in a track.\ntype Exercise struct {\n\tRoot string\n\tTrack string\n\tSlug string\n}\n\n\/\/ NewExerciseFromDir constructs an exercise given the exercise directory.\nfunc NewExerciseFromDir(dir string) Exercise {\n\tslug := filepath.Base(dir)\n\tdir = filepath.Dir(dir)\n\ttrack := filepath.Base(dir)\n\troot := filepath.Dir(dir)\n\treturn Exercise{Root: root, Track: track, Slug: slug}\n}\n\n\/\/ Path is the normalized relative path.\n\/\/ It always has forward slashes, regardless\n\/\/ of the operating system.\nfunc (e Exercise) Path() string {\n\treturn path.Join(e.Track, e.Slug)\n}\n\n\/\/ Filepath is the absolute path on the filesystem.\nfunc (e Exercise) Filepath() string {\n\treturn filepath.Join(e.Root, e.Track, e.Slug)\n}\n\n\/\/ MetadataFilepath is the absolute path to the exercise metadata.\nfunc (e Exercise) MetadataFilepath() string {\n\treturn filepath.Join(e.Filepath(), metadataFilepath)\n}\n\n\/\/ LegacyMetadataFilepath is the absolute path to the legacy exercise metadata.\nfunc (e Exercise) LegacyMetadataFilepath() string {\n\treturn filepath.Join(e.Filepath(), legacyMetadataFilename)\n}\n\n\/\/ MetadataDir returns the directory that the exercise metadata lives in.\n\/\/ For now this is the exercise directory.\nfunc (e Exercise) MetadataDir() string {\n\treturn e.Filepath()\n}\n\n\/\/ HasMetadata checks for the presence of an exercise metadata file.\n\/\/ If there is no such file, this may be a legacy exercise.\n\/\/ It could also be an unrelated directory.\nfunc (e Exercise) HasMetadata() (bool, error) {\n\t_, err := os.Lstat(e.MetadataFilepath())\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}\n\n\/\/ HasLegacyMetadata checks for the presence of a legacy exercise metadata file.\n\/\/ If there is no such file, it could also be an unrelated directory.\nfunc (e Exercise) HasLegacyMetadata() (bool, error) {\n\t_, err := os.Lstat(e.LegacyMetadataFilepath())\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}\n\n\/\/ MigrationStatus represents the result of migrating a legacy metadata file.\ntype MigrationStatus int\n\n\/\/ MigrationStatus\nconst (\n\tMigrationStatusNoop MigrationStatus = iota\n\tMigrationStatusMigrated\n\tMigrationStatusRemoved\n)\n\nfunc (m MigrationStatus) String() string {\n\tswitch m {\n\tcase MigrationStatusMigrated:\n\t\treturn \"\\nMigrated metadata\\n\"\n\tcase MigrationStatusRemoved:\n\t\treturn \"\\nRemoved legacy metadata\\n\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ MigrateLegacyMetadataFile migrates a legacy metadata file to the modern location.\n\/\/ This is a noop if the metadata file isn't legacy.\n\/\/ If both legacy and modern metadata files exist, the legacy file will be deleted.\nfunc (e Exercise) MigrateLegacyMetadataFile() (MigrationStatus, error) {\n\tif ok, _ := e.HasLegacyMetadata(); !ok {\n\t\treturn MigrationStatusNoop, nil\n\t}\n\tif err := os.MkdirAll(filepath.Dir(e.MetadataFilepath()), os.FileMode(0755)); err != nil {\n\t\treturn MigrationStatusNoop, err\n\t}\n\tif ok, _ := e.HasMetadata(); !ok {\n\t\tif err := os.Rename(e.LegacyMetadataFilepath(), e.MetadataFilepath()); err != nil {\n\t\t\treturn MigrationStatusNoop, err\n\t\t}\n\t\treturn MigrationStatusMigrated, nil\n\t}\n\tif err := os.Remove(e.LegacyMetadataFilepath()); err != nil {\n\t\treturn MigrationStatusNoop, err\n\t}\n\treturn MigrationStatusRemoved, nil\n}\n<commit_msg>gofmt<commit_after>package workspace\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n)\n\n\/\/ Exercise is an implementation of a problem in a track.\ntype Exercise struct {\n\tRoot string\n\tTrack string\n\tSlug string\n}\n\n\/\/ NewExerciseFromDir constructs an exercise given the exercise directory.\nfunc NewExerciseFromDir(dir string) Exercise {\n\tslug := filepath.Base(dir)\n\tdir = filepath.Dir(dir)\n\ttrack := filepath.Base(dir)\n\troot := filepath.Dir(dir)\n\treturn Exercise{Root: root, Track: track, Slug: slug}\n}\n\n\/\/ Path is the normalized relative path.\n\/\/ It always has forward slashes, regardless\n\/\/ of the operating system.\nfunc (e Exercise) Path() string {\n\treturn path.Join(e.Track, e.Slug)\n}\n\n\/\/ Filepath is the absolute path on the filesystem.\nfunc (e Exercise) Filepath() string {\n\treturn filepath.Join(e.Root, e.Track, e.Slug)\n}\n\n\/\/ MetadataFilepath is the absolute path to the exercise metadata.\nfunc (e Exercise) MetadataFilepath() string {\n\treturn filepath.Join(e.Filepath(), metadataFilepath)\n}\n\n\/\/ LegacyMetadataFilepath is the absolute path to the legacy exercise metadata.\nfunc (e Exercise) LegacyMetadataFilepath() string {\n\treturn filepath.Join(e.Filepath(), legacyMetadataFilename)\n}\n\n\/\/ MetadataDir returns the directory that the exercise metadata lives in.\n\/\/ For now this is the exercise directory.\nfunc (e Exercise) MetadataDir() string {\n\treturn e.Filepath()\n}\n\n\/\/ HasMetadata checks for the presence of an exercise metadata file.\n\/\/ If there is no such file, this may be a legacy exercise.\n\/\/ It could also be an unrelated directory.\nfunc (e Exercise) HasMetadata() (bool, error) {\n\t_, err := os.Lstat(e.MetadataFilepath())\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}\n\n\/\/ HasLegacyMetadata checks for the presence of a legacy exercise metadata file.\n\/\/ If there is no such file, it could also be an unrelated directory.\nfunc (e Exercise) HasLegacyMetadata() (bool, error) {\n\t_, err := os.Lstat(e.LegacyMetadataFilepath())\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}\n\n\/\/ MigrationStatus represents the result of migrating a legacy metadata file.\ntype MigrationStatus int\n\n\/\/ MigrationStatus\nconst (\n\tMigrationStatusNoop MigrationStatus = iota\n\tMigrationStatusMigrated\n\tMigrationStatusRemoved\n)\n\nfunc (m MigrationStatus) String() string {\n\tswitch m {\n\tcase MigrationStatusMigrated:\n\t\treturn \"\\nMigrated metadata\\n\"\n\tcase MigrationStatusRemoved:\n\t\treturn \"\\nRemoved legacy metadata\\n\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ MigrateLegacyMetadataFile migrates a legacy metadata file to the modern location.\n\/\/ This is a noop if the metadata file isn't legacy.\n\/\/ If both legacy and modern metadata files exist, the legacy file will be deleted.\nfunc (e Exercise) MigrateLegacyMetadataFile() (MigrationStatus, error) {\n\tif ok, _ := e.HasLegacyMetadata(); !ok {\n\t\treturn MigrationStatusNoop, nil\n\t}\n\tif err := os.MkdirAll(filepath.Dir(e.MetadataFilepath()), os.FileMode(0755)); err != nil {\n\t\treturn MigrationStatusNoop, err\n\t}\n\tif ok, _ := e.HasMetadata(); !ok {\n\t\tif err := os.Rename(e.LegacyMetadataFilepath(), e.MetadataFilepath()); err != nil {\n\t\t\treturn MigrationStatusNoop, err\n\t\t}\n\t\treturn MigrationStatusMigrated, nil\n\t}\n\tif err := os.Remove(e.LegacyMetadataFilepath()); err != nil {\n\t\treturn MigrationStatusNoop, err\n\t}\n\treturn MigrationStatusRemoved, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage server\n\nimport (\n\t. \"github.com\/pingcap\/check\"\n\tmysql \"github.com\/pingcap\/tidb\/mysqldef\"\n)\n\nvar _ = Suite(&testUtilSuite{})\n\ntype testUtilSuite struct {\n}\n\nfunc (s *testUtilSuite) TestDumpBinaryTime(c *C) {\n\tt, err := mysql.ParseTimestamp(\"0000-00-00 00:00:00.0000000\")\n\tc.Assert(err, IsNil)\n\td := dumpBinaryDateTime(t, nil)\n\tc.Assert(string(d), Equals, string([]byte{11, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0}))\n\tt, err = mysql.ParseDatetime(\"0000-00-00 00:00:00.0000000\")\n\tc.Assert(err, IsNil)\n\td = dumpBinaryDateTime(t, nil)\n\tc.Assert(string(d), Equals, string([]byte{11, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0}))\n\n\tt, err = mysql.ParseDate(\"0000-00-00\")\n\tc.Assert(err, IsNil)\n\td = dumpBinaryDateTime(t, nil)\n\tc.Assert(string(d), Equals, string([]byte{4, 1, 0, 1, 1}))\n\n\tmyDuration, err := mysql.ParseDuration(\"0000-00-00 00:00:00.0000000\", 6)\n\tc.Assert(err, IsNil)\n\td = dumpBinaryTime(myDuration.Duration)\n\tc.Assert(string(d), Equals, string([]byte{0}))\n}\n<commit_msg>tidb_server: use DeepEquals instead of Equals<commit_after>\/\/ Copyright 2015 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage server\n\nimport (\n\t. \"github.com\/pingcap\/check\"\n\tmysql \"github.com\/pingcap\/tidb\/mysqldef\"\n)\n\nvar _ = Suite(&testUtilSuite{})\n\ntype testUtilSuite struct {\n}\n\nfunc (s *testUtilSuite) TestDumpBinaryTime(c *C) {\n\tt, err := mysql.ParseTimestamp(\"0000-00-00 00:00:00.0000000\")\n\tc.Assert(err, IsNil)\n\td := dumpBinaryDateTime(t, nil)\n\tc.Assert(d, DeepEquals, []byte{11, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0})\n\tt, err = mysql.ParseDatetime(\"0000-00-00 00:00:00.0000000\")\n\tc.Assert(err, IsNil)\n\td = dumpBinaryDateTime(t, nil)\n\tc.Assert(d, DeepEquals, []byte{11, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0})\n\n\tt, err = mysql.ParseDate(\"0000-00-00\")\n\tc.Assert(err, IsNil)\n\td = dumpBinaryDateTime(t, nil)\n\tc.Assert(d, DeepEquals, []byte{4, 1, 0, 1, 1})\n\n\tmyDuration, err := mysql.ParseDuration(\"0000-00-00 00:00:00.0000000\", 6)\n\tc.Assert(err, IsNil)\n\td = dumpBinaryTime(myDuration.Duration)\n\tc.Assert(d, DeepEquals, []byte{0})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Reads the Minecraft Alpha world format.\npackage store_alpha\n\nimport (\n \"fmt\"\n \"io\"\n \"os\"\n \"path\"\n\n \"chunkymonkey\/chunk\/store\"\n . \"chunkymonkey\/types\"\n \"nbt\"\n)\n\ntype ChunkStoreAlpha struct {\n worldPath string\n}\n\nfunc NewChunkStoreAlpha(worldPath string) store.ChunkStore {\n return &ChunkStoreAlpha{\n worldPath: worldPath,\n }\n}\n\nfunc (s *ChunkStoreAlpha) chunkPath(chunkLoc *ChunkXZ) string {\n return path.Join(\n s.worldPath,\n base36Encode(int32(chunkLoc.X&63)),\n base36Encode(int32(chunkLoc.Z&63)),\n \"c.\"+base36Encode(int32(chunkLoc.X))+\".\"+base36Encode(int32(chunkLoc.Z))+\".dat\")\n}\n\n\/\/ Load a chunk from its NBT representation\nfunc (s *ChunkStoreAlpha) LoadChunk(chunkLoc *ChunkXZ) (reader store.ChunkReader, err os.Error) {\n if err != nil {\n return\n }\n\n file, err := os.Open(s.chunkPath(chunkLoc), os.O_RDONLY, 0)\n if err != nil {\n return\n }\n defer file.Close()\n\n reader, err = newChunkReader(file)\n if err != nil {\n return\n }\n\n loadedLoc := reader.ChunkLoc()\n if loadedLoc.X != chunkLoc.X || loadedLoc.Z != chunkLoc.Z {\n err = os.NewError(fmt.Sprintf(\n \"Attempted to load chunk for %+v, but got chunk identified as %+v\",\n chunkLoc,\n loadedLoc,\n ))\n }\n\n return\n}\n\n\/\/ Returned to chunks to pull their data from.\ntype chunkReader struct {\n chunkTag *nbt.NamedTag\n}\n\nfunc newChunkReader(reader io.Reader) (r *chunkReader, err os.Error) {\n chunkTag, err := nbt.Read(reader)\n if err != nil {\n return\n }\n\n r = &chunkReader{\n chunkTag: chunkTag,\n }\n\n return\n}\n\n\/\/ Returns the chunk location.\nfunc (r *chunkReader) ChunkLoc() *ChunkXZ {\n return &ChunkXZ{\n X: ChunkCoord(r.chunkTag.Lookup(\"\/Level\/xPos\").(*nbt.Int).Value),\n Z: ChunkCoord(r.chunkTag.Lookup(\"\/Level\/zPos\").(*nbt.Int).Value),\n }\n}\n\n\/\/ Returns the block IDs in the chunk.\nfunc (r *chunkReader) Blocks() []byte {\n return r.chunkTag.Lookup(\"\/Level\/Blocks\").(*nbt.ByteArray).Value\n}\n\n\/\/ Returns the block data in the chunk.\nfunc (r *chunkReader) BlockData() []byte {\n return r.chunkTag.Lookup(\"\/Level\/Data\").(*nbt.ByteArray).Value\n}\n\n\/\/ Returns the block light data in the chunk.\nfunc (r *chunkReader) BlockLight() []byte {\n return r.chunkTag.Lookup(\"\/Level\/BlockLight\").(*nbt.ByteArray).Value\n}\n\n\/\/ Returns the sky light data in the chunk.\nfunc (r *chunkReader) SkyLight() []byte {\n return r.chunkTag.Lookup(\"\/Level\/SkyLight\").(*nbt.ByteArray).Value\n}\n\n\/\/ Returns the height map data in the chunk.\nfunc (r *chunkReader) HeightMap() []byte {\n return r.chunkTag.Lookup(\"\/Level\/HeightMap\").(*nbt.ByteArray).Value\n}\n\n\/\/ Utility functions:\n\nfunc base36Encode(n int32) (s string) {\n alphabet := \"0123456789abcdefghijklmnopqrstuvwxyz\"\n negative := false\n\n if n < 0 {\n n = -n\n negative = true\n }\n if n == 0 {\n return \"0\"\n }\n\n for n != 0 {\n i := n % int32(len(alphabet))\n n \/= int32(len(alphabet))\n s = string(alphabet[i:i+1]) + s\n }\n if negative {\n s = \"-\" + s\n }\n return\n}\n<commit_msg>Minor tidyups to store_alpha.<commit_after>\/\/ Reads the Minecraft Alpha world format.\npackage store_alpha\n\nimport (\n \"fmt\"\n \"io\"\n \"os\"\n \"path\"\n\n \"chunkymonkey\/chunk\/store\"\n . \"chunkymonkey\/types\"\n \"nbt\"\n)\n\ntype chunkStoreAlpha struct {\n worldPath string\n}\n\nfunc NewChunkStoreAlpha(worldPath string) store.ChunkStore {\n return &chunkStoreAlpha{\n worldPath: worldPath,\n }\n}\n\nfunc (s *chunkStoreAlpha) chunkPath(chunkLoc *ChunkXZ) string {\n return path.Join(\n s.worldPath,\n base36Encode(int32(chunkLoc.X&63)),\n base36Encode(int32(chunkLoc.Z&63)),\n \"c.\"+base36Encode(int32(chunkLoc.X))+\".\"+base36Encode(int32(chunkLoc.Z))+\".dat\")\n}\n\n\/\/ Load a chunk from its NBT representation\nfunc (s *chunkStoreAlpha) LoadChunk(chunkLoc *ChunkXZ) (reader store.ChunkReader, err os.Error) {\n if err != nil {\n return\n }\n\n file, err := os.Open(s.chunkPath(chunkLoc), os.O_RDONLY, 0)\n if err != nil {\n return\n }\n defer file.Close()\n\n reader, err = newChunkReader(file)\n if err != nil {\n return\n }\n\n loadedLoc := reader.ChunkLoc()\n if loadedLoc.X != chunkLoc.X || loadedLoc.Z != chunkLoc.Z {\n err = os.NewError(fmt.Sprintf(\n \"Attempted to load chunk for %+v, but got chunk identified as %+v\",\n chunkLoc,\n loadedLoc,\n ))\n }\n\n return\n}\n\n\/\/ Returned to chunks to pull their data from.\ntype chunkReader struct {\n chunkTag *nbt.NamedTag\n}\n\nfunc newChunkReader(reader io.Reader) (r *chunkReader, err os.Error) {\n chunkTag, err := nbt.Read(reader)\n if err != nil {\n return\n }\n\n r = &chunkReader{\n chunkTag: chunkTag,\n }\n\n return\n}\n\nfunc (r *chunkReader) ChunkLoc() *ChunkXZ {\n return &ChunkXZ{\n X: ChunkCoord(r.chunkTag.Lookup(\"\/Level\/xPos\").(*nbt.Int).Value),\n Z: ChunkCoord(r.chunkTag.Lookup(\"\/Level\/zPos\").(*nbt.Int).Value),\n }\n}\n\nfunc (r *chunkReader) Blocks() []byte {\n return r.chunkTag.Lookup(\"\/Level\/Blocks\").(*nbt.ByteArray).Value\n}\n\nfunc (r *chunkReader) BlockData() []byte {\n return r.chunkTag.Lookup(\"\/Level\/Data\").(*nbt.ByteArray).Value\n}\n\nfunc (r *chunkReader) BlockLight() []byte {\n return r.chunkTag.Lookup(\"\/Level\/BlockLight\").(*nbt.ByteArray).Value\n}\n\nfunc (r *chunkReader) SkyLight() []byte {\n return r.chunkTag.Lookup(\"\/Level\/SkyLight\").(*nbt.ByteArray).Value\n}\n\nfunc (r *chunkReader) HeightMap() []byte {\n return r.chunkTag.Lookup(\"\/Level\/HeightMap\").(*nbt.ByteArray).Value\n}\n\n\/\/ Utility functions:\n\nfunc base36Encode(n int32) (s string) {\n alphabet := \"0123456789abcdefghijklmnopqrstuvwxyz\"\n negative := false\n\n if n < 0 {\n n = -n\n negative = true\n }\n if n == 0 {\n return \"0\"\n }\n\n for n != 0 {\n i := n % int32(len(alphabet))\n n \/= int32(len(alphabet))\n s = string(alphabet[i:i+1]) + s\n }\n if negative {\n s = \"-\" + s\n }\n return\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage time_test\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\t. \"time\"\n)\n\nfunc TestTicker(t *testing.T) {\n\t\/\/ We want to test that a ticker takes as much time as expected.\n\t\/\/ Since we don't want the test to run for too long, we don't\n\t\/\/ want to use lengthy times. This makes the test inherently flaky.\n\t\/\/ So only report an error if it fails five times in a row.\n\n\tcount := 10\n\tdelta := 20 * Millisecond\n\n\t\/\/ On Darwin ARM64 the tick frequency seems limited. Issue 35692.\n\tif (runtime.GOOS == \"darwin\" || runtime.GOOS == \"ios\") && runtime.GOARCH == \"arm64\" {\n\t\t\/\/ The following test will run ticker count\/2 times then reset\n\t\t\/\/ the ticker to double the duration for the rest of count\/2.\n\t\t\/\/ Since tick frequency is limited on Darwin ARM64, use even\n\t\t\/\/ number to give the ticks more time to let the test pass.\n\t\t\/\/ See CL 220638.\n\t\tcount = 6\n\t\tdelta = 100 * Millisecond\n\t}\n\n\tvar errs []string\n\tlogErrs := func() {\n\t\tfor _, e := range errs {\n\t\t\tt.Log(e)\n\t\t}\n\t}\n\n\tfor i := 0; i < 5; i++ {\n\t\tticker := NewTicker(delta)\n\t\tt0 := Now()\n\t\tfor i := 0; i < count\/2; i++ {\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Reset(delta * 2)\n\t\tfor i := count \/ 2; i < count; i++ {\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t\tt1 := Now()\n\t\tdt := t1.Sub(t0)\n\t\ttarget := 3 * delta * Duration(count\/2)\n\t\tslop := target * 2 \/ 10\n\t\tif dt < target-slop || dt > target+slop {\n\t\t\terrs = append(errs, fmt.Sprintf(\"%d %s ticks took %s, expected [%s,%s]\", count, delta, dt, target-slop, target+slop))\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Now test that the ticker stopped.\n\t\tSleep(2 * delta)\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\terrs = append(errs, \"Ticker did not shut down\")\n\t\t\tcontinue\n\t\tdefault:\n\t\t\t\/\/ ok\n\t\t}\n\n\t\t\/\/ Test passed, so all done.\n\t\tif len(errs) > 0 {\n\t\t\tt.Logf(\"saw %d errors, ignoring to avoid flakiness\", len(errs))\n\t\t\tlogErrs()\n\t\t}\n\n\t\treturn\n\t}\n\n\tt.Errorf(\"saw %d errors\", len(errs))\n\tlogErrs()\n}\n\n\/\/ Issue 21874\nfunc TestTickerStopWithDirectInitialization(t *testing.T) {\n\tc := make(chan Time)\n\ttk := &Ticker{C: c}\n\ttk.Stop()\n}\n\n\/\/ Test that a bug tearing down a ticker has been fixed. This routine should not deadlock.\nfunc TestTeardown(t *testing.T) {\n\tDelta := 100 * Millisecond\n\tif testing.Short() {\n\t\tDelta = 20 * Millisecond\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tticker := NewTicker(Delta)\n\t\t<-ticker.C\n\t\tticker.Stop()\n\t}\n}\n\n\/\/ Test the Tick convenience wrapper.\nfunc TestTick(t *testing.T) {\n\t\/\/ Test that giving a negative duration returns nil.\n\tif got := Tick(-1); got != nil {\n\t\tt.Errorf(\"Tick(-1) = %v; want nil\", got)\n\t}\n}\n\n\/\/ Test that NewTicker panics when given a duration less than zero.\nfunc TestNewTickerLtZeroDuration(t *testing.T) {\n\tdefer func() {\n\t\tif err := recover(); err == nil {\n\t\t\tt.Errorf(\"NewTicker(-1) should have panicked\")\n\t\t}\n\t}()\n\tNewTicker(-1)\n}\n\nfunc BenchmarkTicker(b *testing.B) {\n\tbenchmark(b, func(n int) {\n\t\tticker := NewTicker(Nanosecond)\n\t\tfor i := 0; i < n; i++ {\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t})\n}\n\nfunc BenchmarkTickerReset(b *testing.B) {\n\tbenchmark(b, func(n int) {\n\t\tticker := NewTicker(Nanosecond)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tticker.Reset(Nanosecond * 2)\n\t\t}\n\t\tticker.Stop()\n\t})\n}\n\nfunc BenchmarkTickerResetNaive(b *testing.B) {\n\tbenchmark(b, func(n int) {\n\t\tticker := NewTicker(Nanosecond)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tticker.Stop()\n\t\t\tticker = NewTicker(Nanosecond * 2)\n\t\t}\n\t\tticker.Stop()\n\t})\n}\n<commit_msg>time: increase slop for TestTicker<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage time_test\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\t. \"time\"\n)\n\nfunc TestTicker(t *testing.T) {\n\t\/\/ We want to test that a ticker takes as much time as expected.\n\t\/\/ Since we don't want the test to run for too long, we don't\n\t\/\/ want to use lengthy times. This makes the test inherently flaky.\n\t\/\/ So only report an error if it fails five times in a row.\n\n\tcount := 10\n\tdelta := 20 * Millisecond\n\n\t\/\/ On Darwin ARM64 the tick frequency seems limited. Issue 35692.\n\tif (runtime.GOOS == \"darwin\" || runtime.GOOS == \"ios\") && runtime.GOARCH == \"arm64\" {\n\t\t\/\/ The following test will run ticker count\/2 times then reset\n\t\t\/\/ the ticker to double the duration for the rest of count\/2.\n\t\t\/\/ Since tick frequency is limited on Darwin ARM64, use even\n\t\t\/\/ number to give the ticks more time to let the test pass.\n\t\t\/\/ See CL 220638.\n\t\tcount = 6\n\t\tdelta = 100 * Millisecond\n\t}\n\n\tvar errs []string\n\tlogErrs := func() {\n\t\tfor _, e := range errs {\n\t\t\tt.Log(e)\n\t\t}\n\t}\n\n\tfor i := 0; i < 5; i++ {\n\t\tticker := NewTicker(delta)\n\t\tt0 := Now()\n\t\tfor i := 0; i < count\/2; i++ {\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Reset(delta * 2)\n\t\tfor i := count \/ 2; i < count; i++ {\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t\tt1 := Now()\n\t\tdt := t1.Sub(t0)\n\t\ttarget := 3 * delta * Duration(count\/2)\n\t\tslop := target * 3 \/ 10\n\t\tif dt < target-slop || dt > target+slop {\n\t\t\terrs = append(errs, fmt.Sprintf(\"%d %s ticks then %d %s ticks took %s, expected [%s,%s]\", count\/2, delta, count\/2, delta*2, dt, target-slop, target+slop))\n\t\t\tif dt > target+slop {\n\t\t\t\t\/\/ System may be overloaded; sleep a bit\n\t\t\t\t\/\/ in the hopes it will recover.\n\t\t\t\tSleep(Second \/ 2)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Now test that the ticker stopped.\n\t\tSleep(2 * delta)\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\terrs = append(errs, \"Ticker did not shut down\")\n\t\t\tcontinue\n\t\tdefault:\n\t\t\t\/\/ ok\n\t\t}\n\n\t\t\/\/ Test passed, so all done.\n\t\tif len(errs) > 0 {\n\t\t\tt.Logf(\"saw %d errors, ignoring to avoid flakiness\", len(errs))\n\t\t\tlogErrs()\n\t\t}\n\n\t\treturn\n\t}\n\n\tt.Errorf(\"saw %d errors\", len(errs))\n\tlogErrs()\n}\n\n\/\/ Issue 21874\nfunc TestTickerStopWithDirectInitialization(t *testing.T) {\n\tc := make(chan Time)\n\ttk := &Ticker{C: c}\n\ttk.Stop()\n}\n\n\/\/ Test that a bug tearing down a ticker has been fixed. This routine should not deadlock.\nfunc TestTeardown(t *testing.T) {\n\tDelta := 100 * Millisecond\n\tif testing.Short() {\n\t\tDelta = 20 * Millisecond\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tticker := NewTicker(Delta)\n\t\t<-ticker.C\n\t\tticker.Stop()\n\t}\n}\n\n\/\/ Test the Tick convenience wrapper.\nfunc TestTick(t *testing.T) {\n\t\/\/ Test that giving a negative duration returns nil.\n\tif got := Tick(-1); got != nil {\n\t\tt.Errorf(\"Tick(-1) = %v; want nil\", got)\n\t}\n}\n\n\/\/ Test that NewTicker panics when given a duration less than zero.\nfunc TestNewTickerLtZeroDuration(t *testing.T) {\n\tdefer func() {\n\t\tif err := recover(); err == nil {\n\t\t\tt.Errorf(\"NewTicker(-1) should have panicked\")\n\t\t}\n\t}()\n\tNewTicker(-1)\n}\n\nfunc BenchmarkTicker(b *testing.B) {\n\tbenchmark(b, func(n int) {\n\t\tticker := NewTicker(Nanosecond)\n\t\tfor i := 0; i < n; i++ {\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t})\n}\n\nfunc BenchmarkTickerReset(b *testing.B) {\n\tbenchmark(b, func(n int) {\n\t\tticker := NewTicker(Nanosecond)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tticker.Reset(Nanosecond * 2)\n\t\t}\n\t\tticker.Stop()\n\t})\n}\n\nfunc BenchmarkTickerResetNaive(b *testing.B) {\n\tbenchmark(b, func(n int) {\n\t\tticker := NewTicker(Nanosecond)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tticker.Stop()\n\t\t\tticker = NewTicker(Nanosecond * 2)\n\t\t}\n\t\tticker.Stop()\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package in_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/ghttp\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/concourse\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/in\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/logger\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/pivnet\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/sanitizer\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/versions\"\n)\n\nvar _ = Describe(\"In\", func() {\n\tvar (\n\t\tserver *ghttp.Server\n\t\tfile1Contents string\n\n\t\tdownloadDir string\n\n\t\tginkgoLogger logger.Logger\n\n\t\tproductVersion string\n\t\tetag string\n\t\tversionWithETag string\n\n\t\tinRequest concourse.InRequest\n\t\tinCommand *in.InCommand\n\t\tpivnetReleasesResponse *pivnet.ReleasesResponse\n\t)\n\n\tBeforeEach(func() {\n\t\tserver = ghttp.NewServer()\n\n\t\tproductVersion = \"C\"\n\t\tetag = \"etag-0\"\n\n\t\tvar err error\n\t\tversionWithETag, err = versions.CombineVersionAndETag(productVersion, etag)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\treleaseID := 1234\n\t\tfile1URLPath := \"\/file1\"\n\t\tfile1URL := fmt.Sprintf(\"%s%s\", server.URL(), file1URLPath)\n\t\tfile1Contents = \"\"\n\t\tstatusCode := http.StatusOK\n\n\t\tpivnetReleasesResponse = &pivnet.ReleasesResponse{\n\t\t\tReleases: []pivnet.Release{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"A\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: productVersion,\n\t\t\t\t\tID: releaseID,\n\t\t\t\t\tLinks: &pivnet.Links{\n\t\t\t\t\t\tProductFiles: map[string]string{\n\t\t\t\t\t\t\t\"href\": file1URL,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"B\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\n\t\t\t\t\t\"GET\",\n\t\t\t\t\tfmt.Sprintf(\"%s\/products\/%s\/releases\", apiPrefix, productSlug),\n\t\t\t\t),\n\t\t\t\tghttp.RespondWithJSONEncodedPtr(&statusCode, pivnetReleasesResponse),\n\t\t\t),\n\t\t)\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\n\t\t\t\t\t\"POST\",\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\"%s\/products\/%s\/releases\/%d\/eula_acceptance\",\n\t\t\t\t\t\tapiPrefix,\n\t\t\t\t\t\tproductSlug,\n\t\t\t\t\t\treleaseID,\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tghttp.RespondWith(http.StatusOK, \"\"),\n\t\t\t),\n\t\t)\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\n\t\t\t\t\t\"GET\",\n\t\t\t\t\tfile1URLPath,\n\t\t\t\t),\n\t\t\t\tghttp.RespondWith(http.StatusOK, file1Contents),\n\t\t\t),\n\t\t)\n\n\t\tdownloadDir, err = ioutil.TempDir(\"\", \"\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tinRequest = concourse.InRequest{\n\t\t\tSource: concourse.Source{\n\t\t\t\tAPIToken: \"some-api-token\",\n\t\t\t\tProductSlug: productSlug,\n\t\t\t\tEndpoint: server.URL(),\n\t\t\t},\n\t\t\tVersion: concourse.Version{\n\t\t\t\tversionWithETag,\n\t\t\t},\n\t\t}\n\n\t\tsanitized := concourse.SanitizedSource(inRequest.Source)\n\t\tsanitizer := sanitizer.NewSanitizer(sanitized, GinkgoWriter)\n\n\t\tginkgoLogger = logger.NewLogger(sanitizer)\n\n\t\tbinaryVersion := \"v0.1.2\"\n\t\tinCommand = in.NewInCommand(binaryVersion, ginkgoLogger, downloadDir)\n\t})\n\n\tAfterEach(func() {\n\t\tserver.Close()\n\n\t\terr := os.RemoveAll(downloadDir)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tContext(\"when the version comes from concourse\", func() {\n\t\tIt(\"creates a version file with the downloaded version and etag\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tversionFilepath := filepath.Join(downloadDir, \"version\")\n\t\t\tversionContents, err := ioutil.ReadFile(versionFilepath)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(string(versionContents)).To(Equal(versionWithETag))\n\t\t})\n\n\t\tIt(\"does not download any of the files in the specified release\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfiles, err := ioutil.ReadDir(downloadDir)\n\t\t\tExpect(err).ShouldNot(HaveOccurred())\n\n\t\t\t\/\/ the version file will always exist\n\t\t\tExpect(len(files)).To(Equal(1))\n\t\t\tExpect(files[0].Name()).To(Equal(\"version\"))\n\t\t})\n\t})\n\n\tContext(\"when the version is specified by the user\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.ProductVersion = \"1.2.5\"\n\t\t\tpivnetReleasesResponse.Releases[1].Version = \"1.2.5\"\n\t\t})\n\n\t\tIt(\"requests the configured release\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tversionFilepath := filepath.Join(downloadDir, \"version\")\n\t\t\tversionContents, err := ioutil.ReadFile(versionFilepath)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(string(versionContents)).To(Equal(\"1.2.5\"))\n\t\t})\n\t})\n\n\tContext(\"when no api token is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.APIToken = \"\"\n\t\t})\n\n\t\tIt(\"returns an error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(err.Error()).To(MatchRegexp(\".*api_token.*provided\"))\n\t\t})\n\t})\n\n\tContext(\"when no product slug is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.ProductSlug = \"\"\n\t\t})\n\n\t\tIt(\"returns an error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(err.Error()).To(MatchRegexp(\".*product_slug.*provided\"))\n\t\t})\n\t})\n\n\tContext(\"when no product version is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.ProductVersion = \"\"\n\t\t\tinRequest.Version.ProductVersion = \"\"\n\t\t})\n\n\t\tIt(\"returns an error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(err.Error()).To(MatchRegexp(\".*product_version.*provided\"))\n\t\t})\n\t})\n\n\tContext(\"when version is provided without etag\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Version = concourse.Version{\n\t\t\t\tProductVersion: productVersion,\n\t\t\t}\n\t\t})\n\n\t\tIt(\"returns without error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\t})\n})\n<commit_msg>Refactor in tests to use JustBeforeEach<commit_after>package in_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/ghttp\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/concourse\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/in\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/logger\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/pivnet\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/sanitizer\"\n\t\"github.com\/pivotal-cf-experimental\/pivnet-resource\/versions\"\n)\n\nvar _ = Describe(\"In\", func() {\n\tvar (\n\t\tserver *ghttp.Server\n\n\t\treleaseID int\n\n\t\tfile1URLPath string\n\t\tfile1Contents string\n\t\tlinks *pivnet.Links\n\n\t\tdownloadDir string\n\n\t\tginkgoLogger logger.Logger\n\n\t\tproductVersion string\n\t\tetag string\n\t\tversionWithETag string\n\n\t\tinRequest concourse.InRequest\n\t\tinCommand *in.InCommand\n\t\tpivnetReleasesResponse *pivnet.ReleasesResponse\n\t)\n\n\tBeforeEach(func() {\n\t\tserver = ghttp.NewServer()\n\n\t\tproductVersion = \"C\"\n\t\tetag = \"etag-0\"\n\n\t\tvar err error\n\t\tversionWithETag, err = versions.CombineVersionAndETag(productVersion, etag)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\treleaseID = 1234\n\t\tfile1URLPath = \"\/file1\"\n\t\tfile1URL := fmt.Sprintf(\"%s%s\", server.URL(), file1URLPath)\n\t\tlinks = &pivnet.Links{\n\t\t\tProductFiles: map[string]string{\n\t\t\t\t\"href\": file1URL,\n\t\t\t},\n\t\t}\n\t\tfile1Contents = \"\"\n\n\t\tpivnetReleasesResponse = &pivnet.ReleasesResponse{\n\t\t\tReleases: []pivnet.Release{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"A\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: productVersion,\n\t\t\t\t\tID: releaseID,\n\t\t\t\t\tLinks: links,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"B\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tdownloadDir, err = ioutil.TempDir(\"\", \"\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tinRequest = concourse.InRequest{\n\t\t\tSource: concourse.Source{\n\t\t\t\tAPIToken: \"some-api-token\",\n\t\t\t\tProductSlug: productSlug,\n\t\t\t\tEndpoint: server.URL(),\n\t\t\t},\n\t\t\tVersion: concourse.Version{\n\t\t\t\tversionWithETag,\n\t\t\t},\n\t\t}\n\n\t})\n\n\tJustBeforeEach(func() {\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\n\t\t\t\t\t\"GET\",\n\t\t\t\t\tfmt.Sprintf(\"%s\/products\/%s\/releases\", apiPrefix, productSlug),\n\t\t\t\t),\n\t\t\t\tghttp.RespondWithJSONEncoded(http.StatusOK, pivnetReleasesResponse),\n\t\t\t),\n\t\t)\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\n\t\t\t\t\t\"POST\",\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\"%s\/products\/%s\/releases\/%d\/eula_acceptance\",\n\t\t\t\t\t\tapiPrefix,\n\t\t\t\t\t\tproductSlug,\n\t\t\t\t\t\treleaseID,\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tghttp.RespondWith(http.StatusOK, \"\"),\n\t\t\t),\n\t\t)\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\n\t\t\t\t\t\"GET\",\n\t\t\t\t\tfile1URLPath,\n\t\t\t\t),\n\t\t\t\tghttp.RespondWith(http.StatusOK, file1Contents),\n\t\t\t),\n\t\t)\n\n\t\tsanitized := concourse.SanitizedSource(inRequest.Source)\n\t\tsanitizer := sanitizer.NewSanitizer(sanitized, GinkgoWriter)\n\n\t\tginkgoLogger = logger.NewLogger(sanitizer)\n\n\t\tbinaryVersion := \"v0.1.2-unit-tests\"\n\t\tinCommand = in.NewInCommand(binaryVersion, ginkgoLogger, downloadDir)\n\t})\n\n\tAfterEach(func() {\n\t\tserver.Close()\n\n\t\terr := os.RemoveAll(downloadDir)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tContext(\"when the version comes from concourse\", func() {\n\t\tIt(\"creates a version file with the downloaded version and etag\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tversionFilepath := filepath.Join(downloadDir, \"version\")\n\t\t\tversionContents, err := ioutil.ReadFile(versionFilepath)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(string(versionContents)).To(Equal(versionWithETag))\n\t\t})\n\n\t\tIt(\"does not download any of the files in the specified release\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tfiles, err := ioutil.ReadDir(downloadDir)\n\t\t\tExpect(err).ShouldNot(HaveOccurred())\n\n\t\t\t\/\/ the version file will always exist\n\t\t\tExpect(len(files)).To(Equal(1))\n\t\t\tExpect(files[0].Name()).To(Equal(\"version\"))\n\t\t})\n\t})\n\n\tContext(\"when the version is specified by the user\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.ProductVersion = \"1.2.5\"\n\t\t\tpivnetReleasesResponse.Releases[1].Version = \"1.2.5\"\n\t\t})\n\n\t\tIt(\"requests the configured release\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tversionFilepath := filepath.Join(downloadDir, \"version\")\n\t\t\tversionContents, err := ioutil.ReadFile(versionFilepath)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(string(versionContents)).To(Equal(\"1.2.5\"))\n\t\t})\n\t})\n\n\tContext(\"when no api token is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.APIToken = \"\"\n\t\t})\n\n\t\tIt(\"returns an error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(err.Error()).To(MatchRegexp(\".*api_token.*provided\"))\n\t\t})\n\t})\n\n\tContext(\"when no product slug is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.ProductSlug = \"\"\n\t\t})\n\n\t\tIt(\"returns an error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(err.Error()).To(MatchRegexp(\".*product_slug.*provided\"))\n\t\t})\n\t})\n\n\tContext(\"when no product version is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Source.ProductVersion = \"\"\n\t\t\tinRequest.Version.ProductVersion = \"\"\n\t\t})\n\n\t\tIt(\"returns an error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).To(HaveOccurred())\n\n\t\t\tExpect(err.Error()).To(MatchRegexp(\".*product_version.*provided\"))\n\t\t})\n\t})\n\n\tContext(\"when version is provided without etag\", func() {\n\t\tBeforeEach(func() {\n\t\t\tinRequest.Version = concourse.Version{\n\t\t\t\tProductVersion: productVersion,\n\t\t\t}\n\t\t})\n\n\t\tIt(\"returns without error\", func() {\n\t\t\t_, err := inCommand.Run(inRequest)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package inbound\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\"\n)\n\n\/\/ GetPacket returns an bancho packet.\nfunc GetPacket(i io.Reader) (b BasePacket, errF error) {\n\terr := binary.Read(i, binary.LittleEndian, &b.ID)\n\tif i := checkErr(err); i > 0 {\n\t\tif i == 2 {\n\t\t\terrF = err\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ Read a byte and give no fucks if it returns an error\n\ti.Read(make([]byte, 1))\n\n\tvar contentLength uint32\n\terr = binary.Read(i, binary.LittleEndian, &contentLength)\n\tif i := checkErr(err); i > 0 {\n\t\t\/\/ You might think I like copypasting code. I don't. I fucking hate boilerplate code.\n\t\t\/\/ However, this is life.\n\t\tif i == 2 {\n\t\t\terrF = err\n\t\t}\n\t\treturn\n\t}\n\n\tb.Content = make([]byte, contentLength)\n\t_, err = i.Read(b.Content)\n\tif i := checkErr(err); i == 2 {\n\t\terrF = err\n\t\treturn\n\t}\n\n\tb.Initialised = true\n\n\treturn\n}\n\nfunc checkErr(e error) byte {\n\tif e == nil {\n\t\treturn 0\n\t}\n\tif e == io.ErrUnexpectedEOF {\n\t\treturn 1\n\t}\n\treturn 2\n}\n<commit_msg>EOF error is io.EOF, not io.UnexpectedEOF<commit_after>package inbound\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\"\n)\n\n\/\/ GetPacket returns an bancho packet.\nfunc GetPacket(i io.Reader) (b BasePacket, errF error) {\n\terr := binary.Read(i, binary.LittleEndian, &b.ID)\n\tif i := checkErr(err); i > 0 {\n\t\tif i == 2 {\n\t\t\terrF = err\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ Read a byte and give no fucks if it returns an error\n\ti.Read(make([]byte, 1))\n\n\tvar contentLength uint32\n\terr = binary.Read(i, binary.LittleEndian, &contentLength)\n\tif i := checkErr(err); i > 0 {\n\t\t\/\/ You might think I like copypasting code. I don't. I fucking hate boilerplate code.\n\t\t\/\/ However, this is life.\n\t\tif i == 2 {\n\t\t\terrF = err\n\t\t}\n\t\treturn\n\t}\n\n\tb.Content = make([]byte, contentLength)\n\t_, err = i.Read(b.Content)\n\tif i := checkErr(err); i == 2 {\n\t\terrF = err\n\t\treturn\n\t}\n\n\tb.Initialised = true\n\n\treturn\n}\n\nfunc checkErr(e error) byte {\n\tif e == nil {\n\t\treturn 0\n\t}\n\tif e == io.EOF {\n\t\treturn 1\n\t}\n\treturn 2\n}\n<|endoftext|>"} {"text":"<commit_before>package statements\n\nconst (\n\t\/\/ Databases list all data bases\n\tDatabases = `\nSELECT\n\tdatname\nFROM\n\tpg_database\nWHERE\n\tNOT datistemplate\nORDER BY\n\tdatname ASC`\n\n\t\/\/ Schemas list all schema on data base\n\tSchemas = `\nSELECT\n\tschema_name\nFROM\n\tinformation_schema.schemata\nORDER BY\n\tschema_name ASC`\n\n\t\/\/ Tables list all tables\n\tTables = `\nSELECT\n\tn.nspname as \"schema\",\n\tc.relname as \"name\",\n\tCASE c.relkind\n\t\tWHEN 'r' THEN 'table'\n\t\tWHEN 'v' THEN 'view'\n\t\tWHEN 'm' THEN 'materialized_view'\n\t\tWHEN 'i' THEN 'index'\n\t\tWHEN 'S' THEN 'sequence'\n\t\tWHEN 's' THEN 'special'\n\t\tWHEN 'f' THEN 'foreign_table'\n\tEND as \"type\",\n\tpg_catalog.pg_get_userbyid(c.relowner) as \"owner\"\nFROM\n\tpg_catalog.pg_class c\nLEFT JOIN\n\tpg_catalog.pg_namespace n ON n.oid = c.relnamespace\nWHERE\n\tc.relkind IN ('r','v','m','S','s','') AND\n\tn.nspname !~ '^pg_toast' AND\n\tn.nspname NOT IN ('information_schema', 'pg_catalog') AND\n\thas_schema_privilege(n.nspname, 'USAGE')\nORDER BY 1, 2`\n)\n<commit_msg>splits tables listing sql in conditions<commit_after>package statements\n\nconst (\n\t\/\/ Databases list all data bases\n\tDatabases = `\nSELECT\n\tdatname\nFROM\n\tpg_database\nWHERE\n\tNOT datistemplate\nORDER BY\n\tdatname ASC`\n\n\t\/\/ Schemas list all schema on data base\n\tSchemas = `\nSELECT\n\tschema_name\nFROM\n\tinformation_schema.schemata\nORDER BY\n\tschema_name ASC`\n\n\t\/\/ Tables list all tables\n\tTablesSelect = `\nSELECT\n\tn.nspname as \"schema\",\n\tc.relname as \"name\",\n\tCASE c.relkind\n\t\tWHEN 'r' THEN 'table'\n\t\tWHEN 'v' THEN 'view'\n\t\tWHEN 'm' THEN 'materialized_view'\n\t\tWHEN 'i' THEN 'index'\n\t\tWHEN 'S' THEN 'sequence'\n\t\tWHEN 's' THEN 'special'\n\t\tWHEN 'f' THEN 'foreign_table'\n\tEND as \"type\",\n\tpg_catalog.pg_get_userbyid(c.relowner) as \"owner\"\nFROM\n\tpg_catalog.pg_class c\nLEFT JOIN\n\tpg_catalog.pg_namespace n ON n.oid = c.relnamespace `\n\tTablesWhere = `\nWHERE\n\tc.relkind IN ('r','v','m','S','s','') AND\n\tn.nspname !~ '^pg_toast' AND\n\tn.nspname NOT IN ('information_schema', 'pg_catalog') AND\n\thas_schema_privilege(n.nspname, 'USAGE') `\n\tTablesOrderBy = `\nORDER BY 1, 2`\n\tTables = TablesSelect + TablesWhere + TablesOrderBy\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst (\n\tCmdGCloud = \"gcloud\"\n\tCmdKubectl = \"kubectl\"\n\tCmdCFSSL = \"cfssl\"\n\tCmdCFSSLJson = \"cfssljson\"\n)\n\nfunc main() {\n\n\tvar cmdCheck = &cobra.Command{\n\t\tUse: \"check\",\n\t\tShort: \"performs a dependency check\",\n\t\tLong: `This utility requires cfssl, gcloud, kubectl binaries to be \npresent in PATH. This command performs the dependency check.`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := checkDependencies(); err != nil {\n\t\t\t\tfmt.Println(\"Dependency check failed\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Println(\"Dependency check passed. You are good to go.\")\n\t\t},\n\t}\n\n\tvar cmdInstallServiceCatalog = &cobra.Command{\n\t\tUse: \"install-service-catalog\",\n\t\tShort: \"installs Service Catalog in Kubernetes cluster\",\n\t\tLong: `installs Service Catalog in Kubernetes cluster.\nassumes kubectl is configured to connect to the Kubernetes cluster.`,\n\t\t\/\/ Args: cobra.MinimumNArgs(1),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := installServiceCatalog(); err != nil {\n\t\t\t\tfmt.Println(\"Service Catalog could not be installed\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\n\tvar cmdUninstallServiceCatalog = &cobra.Command{\n\t\tUse: \"uninstall-service-catalog\",\n\t\tShort: \"uninstalls Service Catalog in Kubernetes cluster\",\n\t\tLong: `uninstalls Service Catalog in Kubernetes cluster.\nassumes kubectl is configured to connect to the Kubernetes cluster.`,\n\t\tArgs: cobra.MinimumNArgs(1),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := uninstallServiceCatalog(args[0]); err != nil {\n\t\t\t\tfmt.Println(\"Service Catalog could not be installed\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\n\tvar rootCmd = &cobra.Command{Use: \"installer\"}\n\trootCmd.AddCommand(\n\t\tcmdCheck,\n\t\tcmdInstallServiceCatalog,\n\t\tcmdUninstallServiceCatalog,\n\t)\n\trootCmd.Execute()\n}\n\n\/\/ checkDependencies performs a lookup for binary executables that are\n\/\/ required for installing service catalog and configuring GCP broker.\n\/\/ TODO(droot): enhance it to perform connectivity check with Kubernetes Cluster\n\/\/ and user permissions etc.\nfunc checkDependencies() error {\n\trequiredCmds := []string{CmdGCloud, CmdKubectl, CmdCFSSL, CmdCFSSLJson}\n\n\tvar missingCmds []string\n\tfor _, cmd := range requiredCmds {\n\t\t_, err := exec.LookPath(cmd)\n\t\tif err != nil {\n\t\t\tmissingCmds = append(missingCmds, cmd)\n\t\t}\n\t}\n\n\tif len(missingCmds) > 0 {\n\t\treturn fmt.Errorf(\"%s commands not found in the PATH\", strings.Join(missingCmds, \",\"))\n\t}\n\treturn nil\n}\n\nfunc uninstallServiceCatalog(dir string) error {\n\t\/\/ ns := \"service-catalog\"\n\n\tfiles := []string{\n\t\t\"apiserver-deployment.yaml\",\n\t\t\"controller-manager-deployment.yaml\",\n\t\t\"tls-cert-secret.yaml\",\n\t\t\"etcd-svc.yaml\",\n\t\t\"etcd.yaml\",\n\t\t\"api-registration.yaml\",\n\t\t\"service.yaml\",\n\t\t\"rbac.yaml\",\n\t\t\"service-accounts.yaml\",\n\t\t\"namespace.yaml\",\n\t}\n\n\tfor _, f := range files {\n\t\toutput, err := exec.Command(\"kubectl\", \"delete\", \"-f\", filepath.Join(dir, f)).CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Errorf(\"error deleting resources in file: %v :: %v\", f, string(output))\n\t\t\t\/\/ TODO(droot): ignore failures and continue for deleting\n\t\t\tcontinue\n\t\t\t\/\/ return fmt.Errorf(\"deploy failed with output: %s :%v\", err, output)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc installServiceCatalog() error {\n\tif err := checkDependencies(); err != nil {\n\t\treturn err\n\t}\n\n\tns := \"service-catalog\"\n\n\tdir, err := ioutil.TempDir(\"\/tmp\", \"service-catalog\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating temporary dir: %v\", err)\n\t}\n\n\t\/\/ defer os.RemoveAll(dir)\n\n\tcaFilePath, apiServerCertFilePath, apiServerPKFilePath, err := generateSSLArtificats(ns, dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating SSL artifacts : %v\", err)\n\t}\n\n\tfmt.Printf(\"generated caFilePath: %s, apiServerCertFilePath: %s, apiServerPKFilePath: %v \\n\",\n\t\tcaFilePath, apiServerCertFilePath, apiServerPKFilePath)\n\terr = generateYAMLs(dir, caFilePath, apiServerCertFilePath, apiServerPKFilePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating YAML files: %v\", err)\n\t}\n\n\terr = deployYAML(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deploying YAML files: %v\", err)\n\t}\n\tfmt.Println(\"Service Catalog installed successfully\")\n\n\treturn nil\n}\n\nfunc generateYAMLs(dir, caFilePath, apiServerCertFilePath, apiServerPKFilePath string) error {\n\tca, err := base64FileContent(caFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tapiServerCert, err := base64FileContent(apiServerCertFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tapiServerPK, err := base64FileContent(apiServerPKFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := map[string]string{\n\t\t\"CA_PUBLIC_KEY\": ca,\n\t\t\"SVC_PUBLIC_KEY\": apiServerCert,\n\t\t\"SVC_PRIVATE_KEY\": apiServerPK,\n\t}\n\n\terr = generateYAML(filepath.Join(dir, \"api-registration.yaml\"), \"templates\/api-registration.yaml.tmpl\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = generateYAML(filepath.Join(dir, \"tls-cert-secret.yaml\"), \"templates\/tls-cert-secret.yaml.tmpl\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfiles := []string{\"namespace.yaml\", \"service-accounts.yaml\", \"rbac.yaml\", \"service.yaml\", \"etcd.yaml\", \"etcd-svc.yaml\", \"apiserver-deployment.yaml\", \"controller-manager-deployment.yaml\"}\n\n\tfor _, f := range files {\n\t\terr := dumpYAML(\"templates\/\"+f, filepath.Join(dir, f))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deployYAML(dir string) error {\n\tfiles := []string{\n\t\t\"namespace.yaml\",\n\t\t\"service-accounts.yaml\",\n\t\t\"rbac.yaml\",\n\t\t\"service.yaml\",\n\t\t\"api-registration.yaml\",\n\t\t\"etcd.yaml\",\n\t\t\"etcd-svc.yaml\",\n\t\t\"tls-cert-secret.yaml\",\n\t\t\"apiserver-deployment.yaml\",\n\t\t\"controller-manager-deployment.yaml\"}\n\n\tfor _, f := range files {\n\t\toutput, err := exec.Command(\"kubectl\", \"create\", \"-f\", filepath.Join(dir, f)).CombinedOutput()\n\t\t\/\/ TODO(droot): cleanup\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"deploy failed with output: %s :%v\", err, string(output))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc generateYAML(dst, src string, data map[string]string) error {\n\tb, err := Asset(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttp, err := template.New(\"\").Parse(string(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = tp.Execute(f, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc dumpYAML(src, dst string) error {\n\tb, err := Asset(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(dst, b, 0644)\n}\n\nfunc base64FileContent(filePath string) (encoded string, err error) {\n\tb, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoded = base64.StdEncoding.EncodeToString(b)\n\tfmt.Printf(\"\\n['%s']\\n\", encoded)\n\treturn\n}\n\nfunc generateSSLArtificats(ns, dir string) (caFilePath, apiServerCertFilePath, apiServerPKFilePath string, err error) {\n\tapiServerSvcName := \"service-catalog-api\"\n\n\thost1 := fmt.Sprintf(\"%s.%s\", apiServerSvcName, ns)\n\thost2 := host1 + \".svc\"\n\n\tcsrInputJSON := fmt.Sprintf(`{\n\"hosts\": [ \"%s\",\"%s\" ],\n\"key\": {\n\t\"algo\": \"rsa\",\n\t\"size\": 2048\n},\n\"names\": [\n\t{\n\t\t\"C\": \"US\",\n\t\t\"L\": \"san jose\",\n\t\t\"O\": \"kube\",\n\t\t\"OU\": \"WWW\",\n\t\t\"ST\": \"California\"\n\t}\n]\n}`, host1, host2)\n\n\t\/\/ fmt.Println(csrInputJSON)\n\tgenKeyCmd := exec.Command(\"cfssl\", \"genkey\", \"--initca\", \"-\")\n\tgenKeyCmd.Stdin = strings.NewReader(csrInputJSON)\n\n\t\/\/ getKeyOut, err := genKeyCmd.CombinedOutput()\n\t\/\/ if err != nil {\n\t\/\/ \treturn\n\t\/\/ }\n\n\tcaFilePath = filepath.Join(dir, \"ca\")\n\tcmd2 := exec.Command(\"cfssljson\", \"-bare\", caFilePath)\n\n\tout, outErr, err := Pipeline(genKeyCmd, cmd2)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"%s: %s\\n\", out, outErr)\n\n\tcertGenConfigContent := `{\n\"signing\": { \n \"default\": {\n\t\"expiry\": \"43800h\",\n\t\"usages\": [ \"signing\", \"key encipherment\", \"server\" ]\n }\n}\n}\n`\n\tcertConfigFilePath := filepath.Join(dir, \"cert-config.json\")\n\terr = ioutil.WriteFile(certConfigFilePath, []byte(certGenConfigContent), 0666)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error generating cert config : %v\", err)\n\t\treturn\n\t}\n\n\tcertGenJSON := fmt.Sprintf(`\n{\n\"CN\": \"%s\",\n\"hosts\": [ \"%s\",\"%s\" ],\n\"key\": {\n \"algo\": \"rsa\",\n \"size\": 2048\n}\n}\n`, apiServerSvcName, host1, host2)\n\tcertGenCmd := exec.Command(\"cfssl\", \"gencert\",\n\t\t\"-ca=\"+caFilePath+\".pem\",\n\t\t\"-ca-key=\"+caFilePath+\"-key.pem\",\n\t\t\"-config=\"+certConfigFilePath, \"-\")\n\tcertGenCmd.Stdin = strings.NewReader(certGenJSON)\n\n\tapiServerCertFilePath = filepath.Join(dir, \"apiserver\")\n\tcertSignCmd := exec.Command(\"cfssljson\", \"-bare\", apiServerCertFilePath)\n\n\t_, _, err = Pipeline(certGenCmd, certSignCmd)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error signing api server cert: %v\", err)\n\t\treturn\n\t}\n\n\tcaFilePath = caFilePath + \".pem\"\n\tapiServerPKFilePath = apiServerCertFilePath + \"-key.pem\"\n\tapiServerCertFilePath = apiServerCertFilePath + \".pem\"\n\treturn\n}\n\n\/\/\n\/\/ Note: This code is copied from https:\/\/gist.github.com\/kylelemons\/1525278\n\/\/\n\n\/\/ Pipeline strings together the given exec.Cmd commands in a similar fashion\n\/\/ to the Unix pipeline. Each command's standard output is connected to the\n\/\/ standard input of the next command, and the output of the final command in\n\/\/ the pipeline is returned, along with the collected standard error of all\n\/\/ commands and the first error found (if any).\n\/\/\n\/\/ To provide input to the pipeline, assign an io.Reader to the first's Stdin.\nfunc Pipeline(cmds ...*exec.Cmd) (pipeLineOutput, collectedStandardError []byte, pipeLineError error) {\n\t\/\/ Require at least one command\n\tif len(cmds) < 1 {\n\t\treturn nil, nil, nil\n\t}\n\n\t\/\/ Collect the output from the command(s)\n\tvar output bytes.Buffer\n\tvar stderr bytes.Buffer\n\n\tlast := len(cmds) - 1\n\tfor i, cmd := range cmds[:last] {\n\t\tvar err error\n\t\t\/\/ Connect each command's stdin to the previous command's stdout\n\t\tif cmds[i+1].Stdin, err = cmd.StdoutPipe(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t\/\/ Connect each command's stderr to a buffer\n\t\tcmd.Stderr = &stderr\n\t}\n\n\t\/\/ Connect the output and error for the last command\n\tcmds[last].Stdout, cmds[last].Stderr = &output, &stderr\n\n\t\/\/ Start each command\n\tfor _, cmd := range cmds {\n\t\tif err := cmd.Start(); err != nil {\n\t\t\treturn output.Bytes(), stderr.Bytes(), err\n\t\t}\n\t}\n\n\t\/\/ Wait for each command to complete\n\tfor _, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\treturn output.Bytes(), stderr.Bytes(), err\n\t\t}\n\t}\n\n\t\/\/ Return the pipeline output and the collected standard error\n\treturn output.Bytes(), stderr.Bytes(), nil\n}\n<commit_msg>refinement and bug fix for api-registration.<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst (\n\tCmdGCloud = \"gcloud\"\n\tCmdKubectl = \"kubectl\"\n\tCmdCFSSL = \"cfssl\"\n\tCmdCFSSLJson = \"cfssljson\"\n)\n\nfunc main() {\n\n\tvar cmdCheck = &cobra.Command{\n\t\tUse: \"check\",\n\t\tShort: \"performs a dependency check\",\n\t\tLong: `This utility requires cfssl, gcloud, kubectl binaries to be \npresent in PATH. This command performs the dependency check.`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := checkDependencies(); err != nil {\n\t\t\t\tfmt.Println(\"Dependency check failed\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Println(\"Dependency check passed. You are good to go.\")\n\t\t},\n\t}\n\n\tvar cmdInstallServiceCatalog = &cobra.Command{\n\t\tUse: \"install-service-catalog\",\n\t\tShort: \"installs Service Catalog in Kubernetes cluster\",\n\t\tLong: `installs Service Catalog in Kubernetes cluster.\nassumes kubectl is configured to connect to the Kubernetes cluster.`,\n\t\t\/\/ Args: cobra.MinimumNArgs(1),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := installServiceCatalog(); err != nil {\n\t\t\t\tfmt.Println(\"Service Catalog could not be installed\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\n\tvar cmdUninstallServiceCatalog = &cobra.Command{\n\t\tUse: \"uninstall-service-catalog\",\n\t\tShort: \"uninstalls Service Catalog in Kubernetes cluster\",\n\t\tLong: `uninstalls Service Catalog in Kubernetes cluster.\nassumes kubectl is configured to connect to the Kubernetes cluster.`,\n\t\tArgs: cobra.MinimumNArgs(1),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := uninstallServiceCatalog(args[0]); err != nil {\n\t\t\t\tfmt.Println(\"Service Catalog could not be installed\")\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\n\tvar rootCmd = &cobra.Command{Use: \"installer\"}\n\trootCmd.AddCommand(\n\t\tcmdCheck,\n\t\tcmdInstallServiceCatalog,\n\t\tcmdUninstallServiceCatalog,\n\t)\n\trootCmd.Execute()\n}\n\n\/\/ checkDependencies performs a lookup for binary executables that are\n\/\/ required for installing service catalog and configuring GCP broker.\n\/\/ TODO(droot): enhance it to perform connectivity check with Kubernetes Cluster\n\/\/ and user permissions etc.\nfunc checkDependencies() error {\n\trequiredCmds := []string{CmdGCloud, CmdKubectl, CmdCFSSL, CmdCFSSLJson}\n\n\tvar missingCmds []string\n\tfor _, cmd := range requiredCmds {\n\t\t_, err := exec.LookPath(cmd)\n\t\tif err != nil {\n\t\t\tmissingCmds = append(missingCmds, cmd)\n\t\t}\n\t}\n\n\tif len(missingCmds) > 0 {\n\t\treturn fmt.Errorf(\"%s commands not found in the PATH\", strings.Join(missingCmds, \",\"))\n\t}\n\treturn nil\n}\n\nfunc uninstallServiceCatalog(dir string) error {\n\t\/\/ ns := \"service-catalog\"\n\n\tfiles := []string{\n\t\t\"apiserver-deployment.yaml\",\n\t\t\"controller-manager-deployment.yaml\",\n\t\t\"tls-cert-secret.yaml\",\n\t\t\"etcd-svc.yaml\",\n\t\t\"etcd.yaml\",\n\t\t\"api-registration.yaml\",\n\t\t\"service.yaml\",\n\t\t\"rbac.yaml\",\n\t\t\"service-accounts.yaml\",\n\t\t\"namespace.yaml\",\n\t}\n\n\tfor _, f := range files {\n\t\toutput, err := exec.Command(\"kubectl\", \"delete\", \"-f\", filepath.Join(dir, f)).CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Errorf(\"error deleting resources in file: %v :: %v\", f, string(output))\n\t\t\t\/\/ TODO(droot): ignore failures and continue for deleting\n\t\t\tcontinue\n\t\t\t\/\/ return fmt.Errorf(\"deploy failed with output: %s :%v\", err, output)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc installServiceCatalog() error {\n\tif err := checkDependencies(); err != nil {\n\t\treturn err\n\t}\n\n\tns := \"service-catalog\"\n\n\tdir, err := ioutil.TempDir(\"\/tmp\", \"service-catalog\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating temporary dir: %v\", err)\n\t}\n\n\terr = generateCSRs(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating ca csr :%v\", err)\n\t}\n\t\/\/ defer os.RemoveAll(dir)\n\terr = dumpYAML(\"templates\/ca_config.json\", filepath.Join(dir, \"ca_config.json\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcaFilePath, apiServerCertFilePath, apiServerPKFilePath, err := generateSSLArtificats(ns, dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating SSL artifacts : %v\", err)\n\t}\n\n\tfmt.Printf(\"generated caFilePath: %s, apiServerCertFilePath: %s, apiServerPKFilePath: %v \\n\",\n\t\tcaFilePath, apiServerCertFilePath, apiServerPKFilePath)\n\terr = generateYAMLs(dir, caFilePath, apiServerCertFilePath, apiServerPKFilePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating YAML files: %v\", err)\n\t}\n\n\terr = deployYAML(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deploying YAML files: %v\", err)\n\t}\n\tfmt.Println(\"Service Catalog installed successfully\")\n\n\treturn nil\n}\n\nfunc generateCSRs(dir string) error {\n\tapiServerSvcName := \"service-catalog-api\"\n\tns := \"service-catalog\"\n\n\thost1 := fmt.Sprintf(\"%s.%s\", apiServerSvcName, ns)\n\thost2 := host1 + \".svc\"\n\n\tdata := map[string]string{\n\t\t\"Host1\": host1,\n\t\t\"Host2\": host2,\n\t\t\"APIServiceName\": \"service-catalog-api\",\n\t}\n\n\terr := generateYAML(filepath.Join(dir, \"ca_csr.json\"), \"templates\/ca_csr.json.tmpl\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = generateYAML(filepath.Join(dir, \"gencert_config.json\"), \"templates\/gencert_config.json.tmpl\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc generateYAMLs(dir, caFilePath, apiServerCertFilePath, apiServerPKFilePath string) error {\n\tca, err := base64FileContent(caFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tapiServerCert, err := base64FileContent(apiServerCertFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tapiServerPK, err := base64FileContent(apiServerPKFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := map[string]string{\n\t\t\"CA_PUBLIC_KEY\": ca,\n\t\t\"SVC_PUBLIC_KEY\": apiServerCert,\n\t\t\"SVC_PRIVATE_KEY\": apiServerPK,\n\t}\n\n\terr = generateYAML(filepath.Join(dir, \"api-registration.yaml\"), \"templates\/api-registration.yaml.tmpl\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = generateYAML(filepath.Join(dir, \"tls-cert-secret.yaml\"), \"templates\/tls-cert-secret.yaml.tmpl\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfiles := []string{\"namespace.yaml\", \"service-accounts.yaml\", \"rbac.yaml\", \"service.yaml\", \"etcd.yaml\", \"etcd-svc.yaml\", \"apiserver-deployment.yaml\", \"controller-manager-deployment.yaml\"}\n\tfor _, f := range files {\n\t\terr := dumpYAML(\"templates\/\"+f, filepath.Join(dir, f))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deployYAML(dir string) error {\n\tfiles := []string{\n\t\t\"namespace.yaml\",\n\t\t\"service-accounts.yaml\",\n\t\t\"rbac.yaml\",\n\t\t\"service.yaml\",\n\t\t\"api-registration.yaml\",\n\t\t\"etcd.yaml\",\n\t\t\"etcd-svc.yaml\",\n\t\t\"tls-cert-secret.yaml\",\n\t\t\"apiserver-deployment.yaml\",\n\t\t\"controller-manager-deployment.yaml\"}\n\n\tfor _, f := range files {\n\t\toutput, err := exec.Command(\"kubectl\", \"create\", \"-f\", filepath.Join(dir, f)).CombinedOutput()\n\t\t\/\/ TODO(droot): cleanup\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"deploy failed with output: %s :%v\", err, string(output))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc generateYAML(dst, src string, data map[string]string) error {\n\tb, err := Asset(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttp, err := template.New(\"\").Parse(string(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = tp.Execute(f, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc dumpYAML(src, dst string) error {\n\tb, err := Asset(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(dst, b, 0644)\n}\n\nfunc base64FileContent(filePath string) (encoded string, err error) {\n\tb, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoded = base64.StdEncoding.EncodeToString(b)\n\treturn\n}\n\nfunc generateSSLArtificats(ns, dir string) (caFilePath, apiServerCertFilePath, apiServerPKFilePath string, err error) {\n\n\tcsrInputJSON := filepath.Join(dir, \"ca_csr.json\")\n\n\tgenKeyCmd := exec.Command(\"cfssl\", \"genkey\", \"--initca\", csrInputJSON)\n\n\tcaFilePath = filepath.Join(dir, \"ca\")\n\tcmd2 := exec.Command(\"cfssljson\", \"-bare\", caFilePath)\n\n\tout, outErr, err := Pipeline(genKeyCmd, cmd2)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"%s: %s\\n\", out, outErr)\n\n\tcertConfigFilePath := filepath.Join(dir, \"ca_config.json\")\n\tcertGenJSON := filepath.Join(dir, \"gencert_config.json\")\n\n\tcertGenCmd := exec.Command(\"cfssl\", \"gencert\",\n\t\t\"-ca=\"+caFilePath+\".pem\",\n\t\t\"-ca-key=\"+caFilePath+\"-key.pem\",\n\t\t\"-config=\"+certConfigFilePath, certGenJSON)\n\n\tapiServerCertFilePath = filepath.Join(dir, \"apiserver\")\n\tcertSignCmd := exec.Command(\"cfssljson\", \"-bare\", apiServerCertFilePath)\n\n\t_, _, err = Pipeline(certGenCmd, certSignCmd)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error signing api server cert: %v\", err)\n\t\treturn\n\t}\n\n\tcaFilePath = caFilePath + \".pem\"\n\tapiServerPKFilePath = apiServerCertFilePath + \"-key.pem\"\n\tapiServerCertFilePath = apiServerCertFilePath + \".pem\"\n\treturn\n}\n\n\/\/\n\/\/ Note: This code is copied from https:\/\/gist.github.com\/kylelemons\/1525278\n\/\/\n\n\/\/ Pipeline strings together the given exec.Cmd commands in a similar fashion\n\/\/ to the Unix pipeline. Each command's standard output is connected to the\n\/\/ standard input of the next command, and the output of the final command in\n\/\/ the pipeline is returned, along with the collected standard error of all\n\/\/ commands and the first error found (if any).\n\/\/\n\/\/ To provide input to the pipeline, assign an io.Reader to the first's Stdin.\nfunc Pipeline(cmds ...*exec.Cmd) (pipeLineOutput, collectedStandardError []byte, pipeLineError error) {\n\t\/\/ Require at least one command\n\tif len(cmds) < 1 {\n\t\treturn nil, nil, nil\n\t}\n\n\t\/\/ Collect the output from the command(s)\n\tvar output bytes.Buffer\n\tvar stderr bytes.Buffer\n\n\tlast := len(cmds) - 1\n\tfor i, cmd := range cmds[:last] {\n\t\tvar err error\n\t\t\/\/ Connect each command's stdin to the previous command's stdout\n\t\tif cmds[i+1].Stdin, err = cmd.StdoutPipe(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t\/\/ Connect each command's stderr to a buffer\n\t\tcmd.Stderr = &stderr\n\t}\n\n\t\/\/ Connect the output and error for the last command\n\tcmds[last].Stdout, cmds[last].Stderr = &output, &stderr\n\n\t\/\/ Start each command\n\tfor _, cmd := range cmds {\n\t\tif err := cmd.Start(); err != nil {\n\t\t\treturn output.Bytes(), stderr.Bytes(), err\n\t\t}\n\t}\n\n\t\/\/ Wait for each command to complete\n\tfor _, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\treturn output.Bytes(), stderr.Bytes(), err\n\t\t}\n\t}\n\n\t\/\/ Return the pipeline output and the collected standard error\n\treturn output.Bytes(), stderr.Bytes(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build linux\n\/\/ +build linux\n\npackage ipvlan\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/docker\/docker\/libnetwork\/driverapi\"\n\t\"github.com\/docker\/docker\/libnetwork\/netlabel\"\n\t\"github.com\/docker\/docker\/libnetwork\/ns\"\n\t\"github.com\/docker\/docker\/libnetwork\/options\"\n\t\"github.com\/docker\/docker\/libnetwork\/osl\"\n\t\"github.com\/docker\/docker\/libnetwork\/types\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\/kernel\"\n\t\"github.com\/docker\/docker\/pkg\/stringid\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ CreateNetwork the network for the specified driver type\nfunc (d *driver) CreateNetwork(nid string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {\n\tdefer osl.InitOSContext()()\n\tkv, err := kernel.GetKernelVersion()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to check kernel version for %s driver support: %v\", ipvlanType, err)\n\t}\n\t\/\/ ensure Kernel version is >= v4.2 for ipvlan support\n\tif kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) {\n\t\treturn fmt.Errorf(\"kernel version failed to meet the minimum ipvlan kernel requirement of %d.%d, found %d.%d.%d\",\n\t\t\tipvlanKernelVer, ipvlanMajorVer, kv.Kernel, kv.Major, kv.Minor)\n\t}\n\t\/\/ reject a null v4 network\n\tif len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == \"0.0.0.0\/0\" {\n\t\treturn fmt.Errorf(\"ipv4 pool is empty\")\n\t}\n\t\/\/ parse and validate the config and bind to networkConfiguration\n\tconfig, err := parseNetworkOptions(nid, option)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig.ID = nid\n\terr = config.processIPAM(nid, ipV4Data, ipV6Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ verify the ipvlan mode from -o ipvlan_mode option\n\tswitch config.IpvlanMode {\n\tcase \"\", modeL2:\n\t\t\/\/ default to ipvlan L2 mode if -o ipvlan_mode is empty\n\t\tconfig.IpvlanMode = modeL2\n\tcase modeL3:\n\t\tconfig.IpvlanMode = modeL3\n\tcase modeL3S:\n\t\tconfig.IpvlanMode = modeL3S\n\tdefault:\n\t\treturn fmt.Errorf(\"requested ipvlan mode '%s' is not valid, 'l2' mode is the ipvlan driver default\", config.IpvlanMode)\n\t}\n\t\/\/ verify the ipvlan flag from -o ipvlan_flag option\n\tswitch config.IpvlanFlag {\n\tcase \"\", flagBridge:\n\t\t\/\/ default to bridge if -o ipvlan_flag is empty\n\t\tconfig.IpvlanFlag = flagBridge\n\tcase flagPrivate:\n\t\tconfig.IpvlanFlag = flagPrivate\n\tcase flagVepa:\n\t\tconfig.IpvlanFlag = flagVepa\n\tdefault:\n\t\treturn fmt.Errorf(\"requested ipvlan flag '%s' is not valid, 'bridge' is the ipvlan driver default\", config.IpvlanFlag)\n\t}\n\t\/\/ loopback is not a valid parent link\n\tif config.Parent == \"lo\" {\n\t\treturn fmt.Errorf(\"loopback interface is not a valid %s parent link\", ipvlanType)\n\t}\n\t\/\/ if parent interface not specified, create a dummy type link to use named dummy+net_id\n\tif config.Parent == \"\" {\n\t\tconfig.Parent = getDummyName(stringid.TruncateID(config.ID))\n\t}\n\tfoundExisting, err := d.createNetwork(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif foundExisting {\n\t\treturn types.InternalMaskableErrorf(\"restoring existing network %s\", config.ID)\n\t}\n\t\/\/ update persistent db, rollback on fail\n\terr = d.storeUpdate(config)\n\tif err != nil {\n\t\td.deleteNetwork(config.ID)\n\t\tlogrus.Debugf(\"encountered an error rolling back a network create for %s : %v\", config.ID, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ createNetwork is used by new network callbacks and persistent network cache\nfunc (d *driver) createNetwork(config *configuration) (bool, error) {\n\tfoundExisting := false\n\tnetworkList := d.getNetworks()\n\tfor _, nw := range networkList {\n\t\tif config.Parent == nw.config.Parent {\n\t\t\tif config.ID != nw.config.ID {\n\t\t\t\treturn false, fmt.Errorf(\"network %s is already using parent interface %s\",\n\t\t\t\t\tgetDummyName(stringid.TruncateID(nw.config.ID)), config.Parent)\n\t\t\t}\n\t\t\tlogrus.Debugf(\"Create Network for the same ID %s\\n\", config.ID)\n\t\t\tfoundExisting = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !parentExists(config.Parent) {\n\t\t\/\/ Create a dummy link if a dummy name is set for parent\n\t\tif dummyName := getDummyName(stringid.TruncateID(config.ID)); dummyName == config.Parent {\n\t\t\terr := createDummyLink(config.Parent, dummyName)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tconfig.CreatedSlaveLink = true\n\n\t\t\t\/\/ notify the user in logs they have limited communications\n\t\t\tlogrus.Debugf(\"Empty -o parent= flags limit communications to other containers inside of network: %s\",\n\t\t\t\tconfig.Parent)\n\t\t} else {\n\t\t\t\/\/ if the subinterface parent_iface.vlan_id checks do not pass, return err.\n\t\t\t\/\/ a valid example is 'eth0.10' for a parent iface 'eth0' with a vlan id '10'\n\t\t\terr := createVlanLink(config.Parent)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\t\/\/ if driver created the networks slave link, record it for future deletion\n\t\t\tconfig.CreatedSlaveLink = true\n\t\t}\n\t}\n\tif !foundExisting {\n\t\tn := &network{\n\t\t\tid: config.ID,\n\t\t\tdriver: d,\n\t\t\tendpoints: endpointTable{},\n\t\t\tconfig: config,\n\t\t}\n\t\t\/\/ add the network\n\t\td.addNetwork(n)\n\t}\n\n\treturn foundExisting, nil\n}\n\n\/\/ DeleteNetwork the network for the specified driver type\nfunc (d *driver) DeleteNetwork(nid string) error {\n\tdefer osl.InitOSContext()()\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn fmt.Errorf(\"network id %s not found\", nid)\n\t}\n\t\/\/ if the driver created the slave interface, delete it, otherwise leave it\n\tif ok := n.config.CreatedSlaveLink; ok {\n\t\t\/\/ if the interface exists, only delete if it matches iface.vlan or dummy.net_id naming\n\t\tif ok := parentExists(n.config.Parent); ok {\n\t\t\t\/\/ only delete the link if it is named the net_id\n\t\t\tif n.config.Parent == getDummyName(stringid.TruncateID(nid)) {\n\t\t\t\terr := delDummyLink(n.config.Parent)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Debugf(\"link %s was not deleted, continuing the delete network operation: %v\",\n\t\t\t\t\t\tn.config.Parent, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ only delete the link if it matches iface.vlan naming\n\t\t\t\terr := delVlanLink(n.config.Parent)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Debugf(\"link %s was not deleted, continuing the delete network operation: %v\",\n\t\t\t\t\t\tn.config.Parent, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, ep := range n.endpoints {\n\t\tif link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {\n\t\t\tif err := ns.NlHandle().LinkDel(link); err != nil {\n\t\t\t\tlogrus.WithError(err).Warnf(\"Failed to delete interface (%s)'s link on endpoint (%s) delete\", ep.srcName, ep.id)\n\t\t\t}\n\t\t}\n\n\t\tif err := d.storeDelete(ep); err != nil {\n\t\t\tlogrus.Warnf(\"Failed to remove ipvlan endpoint %.7s from store: %v\", ep.id, err)\n\t\t}\n\t}\n\t\/\/ delete the *network\n\td.deleteNetwork(nid)\n\t\/\/ delete the network record from persistent cache\n\terr := d.storeDelete(n.config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting deleting id %s from datastore: %v\", nid, err)\n\t}\n\treturn nil\n}\n\n\/\/ parseNetworkOptions parse docker network options\nfunc parseNetworkOptions(id string, option options.Generic) (*configuration, error) {\n\tvar (\n\t\terr error\n\t\tconfig = &configuration{}\n\t)\n\t\/\/ parse generic labels first\n\tif genData, ok := option[netlabel.GenericData]; ok && genData != nil {\n\t\tif config, err = parseNetworkGenericOptions(genData); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif val, ok := option[netlabel.Internal]; ok {\n\t\tif internal, ok := val.(bool); ok && internal {\n\t\t\tconfig.Internal = true\n\t\t}\n\t}\n\treturn config, nil\n}\n\n\/\/ parseNetworkGenericOptions parse generic driver docker network options\nfunc parseNetworkGenericOptions(data interface{}) (*configuration, error) {\n\tvar (\n\t\terr error\n\t\tconfig *configuration\n\t)\n\tswitch opt := data.(type) {\n\tcase *configuration:\n\t\tconfig = opt\n\tcase map[string]string:\n\t\tconfig = &configuration{}\n\t\terr = config.fromOptions(opt)\n\tcase options.Generic:\n\t\tvar opaqueConfig interface{}\n\t\tif opaqueConfig, err = options.GenerateFromModel(opt, config); err == nil {\n\t\t\tconfig = opaqueConfig.(*configuration)\n\t\t}\n\tdefault:\n\t\terr = types.BadRequestErrorf(\"unrecognized network configuration format: %v\", opt)\n\t}\n\treturn config, err\n}\n\n\/\/ fromOptions binds the generic options to networkConfiguration to cache\nfunc (config *configuration) fromOptions(labels map[string]string) error {\n\tfor label, value := range labels {\n\t\tswitch label {\n\t\tcase parentOpt:\n\t\t\t\/\/ parse driver option '-o parent'\n\t\t\tconfig.Parent = value\n\t\tcase driverModeOpt:\n\t\t\t\/\/ parse driver option '-o ipvlan_mode'\n\t\t\tconfig.IpvlanMode = value\n\t\tcase driverFlagOpt:\n\t\t\t\/\/ parse driver option '-o ipvlan_flag'\n\t\t\tconfig.IpvlanFlag = value\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ processIPAM parses v4 and v6 IP information and binds it to the network configuration\nfunc (config *configuration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error {\n\tif len(ipamV4Data) > 0 {\n\t\tfor _, ipd := range ipamV4Data {\n\t\t\ts := &ipSubnet{\n\t\t\t\tSubnetIP: ipd.Pool.String(),\n\t\t\t\tGwIP: ipd.Gateway.String(),\n\t\t\t}\n\t\t\tconfig.Ipv4Subnets = append(config.Ipv4Subnets, s)\n\t\t}\n\t}\n\tif len(ipamV6Data) > 0 {\n\t\tfor _, ipd := range ipamV6Data {\n\t\t\ts := &ipSubnet{\n\t\t\t\tSubnetIP: ipd.Pool.String(),\n\t\t\t\tGwIP: ipd.Gateway.String(),\n\t\t\t}\n\t\t\tconfig.Ipv6Subnets = append(config.Ipv6Subnets, s)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>libnetwork: ipvlan: set network ID as part of parseNetworkOptions<commit_after>\/\/go:build linux\n\/\/ +build linux\n\npackage ipvlan\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/docker\/docker\/libnetwork\/driverapi\"\n\t\"github.com\/docker\/docker\/libnetwork\/netlabel\"\n\t\"github.com\/docker\/docker\/libnetwork\/ns\"\n\t\"github.com\/docker\/docker\/libnetwork\/options\"\n\t\"github.com\/docker\/docker\/libnetwork\/osl\"\n\t\"github.com\/docker\/docker\/libnetwork\/types\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\/kernel\"\n\t\"github.com\/docker\/docker\/pkg\/stringid\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ CreateNetwork the network for the specified driver type\nfunc (d *driver) CreateNetwork(nid string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {\n\tdefer osl.InitOSContext()()\n\tkv, err := kernel.GetKernelVersion()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to check kernel version for %s driver support: %v\", ipvlanType, err)\n\t}\n\t\/\/ ensure Kernel version is >= v4.2 for ipvlan support\n\tif kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) {\n\t\treturn fmt.Errorf(\"kernel version failed to meet the minimum ipvlan kernel requirement of %d.%d, found %d.%d.%d\",\n\t\t\tipvlanKernelVer, ipvlanMajorVer, kv.Kernel, kv.Major, kv.Minor)\n\t}\n\t\/\/ reject a null v4 network\n\tif len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == \"0.0.0.0\/0\" {\n\t\treturn fmt.Errorf(\"ipv4 pool is empty\")\n\t}\n\t\/\/ parse and validate the config and bind to networkConfiguration\n\tconfig, err := parseNetworkOptions(nid, option)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = config.processIPAM(nid, ipV4Data, ipV6Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ verify the ipvlan mode from -o ipvlan_mode option\n\tswitch config.IpvlanMode {\n\tcase \"\", modeL2:\n\t\t\/\/ default to ipvlan L2 mode if -o ipvlan_mode is empty\n\t\tconfig.IpvlanMode = modeL2\n\tcase modeL3:\n\t\tconfig.IpvlanMode = modeL3\n\tcase modeL3S:\n\t\tconfig.IpvlanMode = modeL3S\n\tdefault:\n\t\treturn fmt.Errorf(\"requested ipvlan mode '%s' is not valid, 'l2' mode is the ipvlan driver default\", config.IpvlanMode)\n\t}\n\t\/\/ verify the ipvlan flag from -o ipvlan_flag option\n\tswitch config.IpvlanFlag {\n\tcase \"\", flagBridge:\n\t\t\/\/ default to bridge if -o ipvlan_flag is empty\n\t\tconfig.IpvlanFlag = flagBridge\n\tcase flagPrivate:\n\t\tconfig.IpvlanFlag = flagPrivate\n\tcase flagVepa:\n\t\tconfig.IpvlanFlag = flagVepa\n\tdefault:\n\t\treturn fmt.Errorf(\"requested ipvlan flag '%s' is not valid, 'bridge' is the ipvlan driver default\", config.IpvlanFlag)\n\t}\n\t\/\/ loopback is not a valid parent link\n\tif config.Parent == \"lo\" {\n\t\treturn fmt.Errorf(\"loopback interface is not a valid %s parent link\", ipvlanType)\n\t}\n\t\/\/ if parent interface not specified, create a dummy type link to use named dummy+net_id\n\tif config.Parent == \"\" {\n\t\tconfig.Parent = getDummyName(stringid.TruncateID(config.ID))\n\t}\n\tfoundExisting, err := d.createNetwork(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif foundExisting {\n\t\treturn types.InternalMaskableErrorf(\"restoring existing network %s\", config.ID)\n\t}\n\t\/\/ update persistent db, rollback on fail\n\terr = d.storeUpdate(config)\n\tif err != nil {\n\t\td.deleteNetwork(config.ID)\n\t\tlogrus.Debugf(\"encountered an error rolling back a network create for %s : %v\", config.ID, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ createNetwork is used by new network callbacks and persistent network cache\nfunc (d *driver) createNetwork(config *configuration) (bool, error) {\n\tfoundExisting := false\n\tnetworkList := d.getNetworks()\n\tfor _, nw := range networkList {\n\t\tif config.Parent == nw.config.Parent {\n\t\t\tif config.ID != nw.config.ID {\n\t\t\t\treturn false, fmt.Errorf(\"network %s is already using parent interface %s\",\n\t\t\t\t\tgetDummyName(stringid.TruncateID(nw.config.ID)), config.Parent)\n\t\t\t}\n\t\t\tlogrus.Debugf(\"Create Network for the same ID %s\\n\", config.ID)\n\t\t\tfoundExisting = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !parentExists(config.Parent) {\n\t\t\/\/ Create a dummy link if a dummy name is set for parent\n\t\tif dummyName := getDummyName(stringid.TruncateID(config.ID)); dummyName == config.Parent {\n\t\t\terr := createDummyLink(config.Parent, dummyName)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tconfig.CreatedSlaveLink = true\n\n\t\t\t\/\/ notify the user in logs they have limited communications\n\t\t\tlogrus.Debugf(\"Empty -o parent= flags limit communications to other containers inside of network: %s\",\n\t\t\t\tconfig.Parent)\n\t\t} else {\n\t\t\t\/\/ if the subinterface parent_iface.vlan_id checks do not pass, return err.\n\t\t\t\/\/ a valid example is 'eth0.10' for a parent iface 'eth0' with a vlan id '10'\n\t\t\terr := createVlanLink(config.Parent)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\t\/\/ if driver created the networks slave link, record it for future deletion\n\t\t\tconfig.CreatedSlaveLink = true\n\t\t}\n\t}\n\tif !foundExisting {\n\t\tn := &network{\n\t\t\tid: config.ID,\n\t\t\tdriver: d,\n\t\t\tendpoints: endpointTable{},\n\t\t\tconfig: config,\n\t\t}\n\t\t\/\/ add the network\n\t\td.addNetwork(n)\n\t}\n\n\treturn foundExisting, nil\n}\n\n\/\/ DeleteNetwork the network for the specified driver type\nfunc (d *driver) DeleteNetwork(nid string) error {\n\tdefer osl.InitOSContext()()\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn fmt.Errorf(\"network id %s not found\", nid)\n\t}\n\t\/\/ if the driver created the slave interface, delete it, otherwise leave it\n\tif ok := n.config.CreatedSlaveLink; ok {\n\t\t\/\/ if the interface exists, only delete if it matches iface.vlan or dummy.net_id naming\n\t\tif ok := parentExists(n.config.Parent); ok {\n\t\t\t\/\/ only delete the link if it is named the net_id\n\t\t\tif n.config.Parent == getDummyName(stringid.TruncateID(nid)) {\n\t\t\t\terr := delDummyLink(n.config.Parent)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Debugf(\"link %s was not deleted, continuing the delete network operation: %v\",\n\t\t\t\t\t\tn.config.Parent, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ only delete the link if it matches iface.vlan naming\n\t\t\t\terr := delVlanLink(n.config.Parent)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Debugf(\"link %s was not deleted, continuing the delete network operation: %v\",\n\t\t\t\t\t\tn.config.Parent, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, ep := range n.endpoints {\n\t\tif link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {\n\t\t\tif err := ns.NlHandle().LinkDel(link); err != nil {\n\t\t\t\tlogrus.WithError(err).Warnf(\"Failed to delete interface (%s)'s link on endpoint (%s) delete\", ep.srcName, ep.id)\n\t\t\t}\n\t\t}\n\n\t\tif err := d.storeDelete(ep); err != nil {\n\t\t\tlogrus.Warnf(\"Failed to remove ipvlan endpoint %.7s from store: %v\", ep.id, err)\n\t\t}\n\t}\n\t\/\/ delete the *network\n\td.deleteNetwork(nid)\n\t\/\/ delete the network record from persistent cache\n\terr := d.storeDelete(n.config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting deleting id %s from datastore: %v\", nid, err)\n\t}\n\treturn nil\n}\n\n\/\/ parseNetworkOptions parse docker network options\nfunc parseNetworkOptions(id string, option options.Generic) (*configuration, error) {\n\tvar (\n\t\terr error\n\t\tconfig = &configuration{}\n\t)\n\t\/\/ parse generic labels first\n\tif genData, ok := option[netlabel.GenericData]; ok && genData != nil {\n\t\tif config, err = parseNetworkGenericOptions(genData); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif val, ok := option[netlabel.Internal]; ok {\n\t\tif internal, ok := val.(bool); ok && internal {\n\t\t\tconfig.Internal = true\n\t\t}\n\t}\n\tconfig.ID = id\n\treturn config, nil\n}\n\n\/\/ parseNetworkGenericOptions parse generic driver docker network options\nfunc parseNetworkGenericOptions(data interface{}) (*configuration, error) {\n\tvar (\n\t\terr error\n\t\tconfig *configuration\n\t)\n\tswitch opt := data.(type) {\n\tcase *configuration:\n\t\tconfig = opt\n\tcase map[string]string:\n\t\tconfig = &configuration{}\n\t\terr = config.fromOptions(opt)\n\tcase options.Generic:\n\t\tvar opaqueConfig interface{}\n\t\tif opaqueConfig, err = options.GenerateFromModel(opt, config); err == nil {\n\t\t\tconfig = opaqueConfig.(*configuration)\n\t\t}\n\tdefault:\n\t\terr = types.BadRequestErrorf(\"unrecognized network configuration format: %v\", opt)\n\t}\n\treturn config, err\n}\n\n\/\/ fromOptions binds the generic options to networkConfiguration to cache\nfunc (config *configuration) fromOptions(labels map[string]string) error {\n\tfor label, value := range labels {\n\t\tswitch label {\n\t\tcase parentOpt:\n\t\t\t\/\/ parse driver option '-o parent'\n\t\t\tconfig.Parent = value\n\t\tcase driverModeOpt:\n\t\t\t\/\/ parse driver option '-o ipvlan_mode'\n\t\t\tconfig.IpvlanMode = value\n\t\tcase driverFlagOpt:\n\t\t\t\/\/ parse driver option '-o ipvlan_flag'\n\t\t\tconfig.IpvlanFlag = value\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ processIPAM parses v4 and v6 IP information and binds it to the network configuration\nfunc (config *configuration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error {\n\tif len(ipamV4Data) > 0 {\n\t\tfor _, ipd := range ipamV4Data {\n\t\t\ts := &ipSubnet{\n\t\t\t\tSubnetIP: ipd.Pool.String(),\n\t\t\t\tGwIP: ipd.Gateway.String(),\n\t\t\t}\n\t\t\tconfig.Ipv4Subnets = append(config.Ipv4Subnets, s)\n\t\t}\n\t}\n\tif len(ipamV6Data) > 0 {\n\t\tfor _, ipd := range ipamV6Data {\n\t\t\ts := &ipSubnet{\n\t\t\t\tSubnetIP: ipd.Pool.String(),\n\t\t\t\tGwIP: ipd.Gateway.String(),\n\t\t\t}\n\t\t\tconfig.Ipv6Subnets = append(config.Ipv6Subnets, s)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n\t\"golang.org\/x\/pkgsite\/internal\/source\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n)\n\nconst (\n\t\/\/ LatestVersion signifies the latest available version in requests to the\n\t\/\/ proxy client.\n\tLatestVersion = \"latest\"\n\n\t\/\/ MasterVersion signifies the version at master.\n\tMasterVersion = \"master\"\n\n\t\/\/ UnknownModulePath signifies that the module path for a given package\n\t\/\/ path is ambiguous or not known. This is because requests to the\n\t\/\/ frontend can come in the form of <import-path>[@<version>], and it is\n\t\/\/ not clear which part of the import-path is the module path.\n\tUnknownModulePath = \"unknownModulePath\"\n)\n\n\/\/ ModuleInfo holds metadata associated with a module.\ntype ModuleInfo struct {\n\tModulePath string\n\tVersion string\n\tCommitTime time.Time\n\tIsRedistributable bool\n\tHasGoMod bool \/\/ whether the module zip has a go.mod file\n\tSourceInfo *source.Info\n}\n\n\/\/ VersionMap holds metadata associated with module queries for a version.\ntype VersionMap struct {\n\tModulePath string\n\tRequestedVersion string\n\tResolvedVersion string\n\tGoModPath string\n\tStatus int\n\tError string\n\tUpdatedAt time.Time\n}\n\n\/\/ SeriesPath returns the series path for the module.\n\/\/\n\/\/ A series is a group of modules that share the same base path and are assumed\n\/\/ to be major-version variants.\n\/\/\n\/\/ The series path is the module path without the version. For most modules,\n\/\/ this will be the module path for all module versions with major version 0 or\n\/\/ 1. For gopkg.in modules, the series path does not correspond to any module\n\/\/ version.\n\/\/\n\/\/ Examples:\n\/\/ The module paths \"a\/b\" and \"a\/b\/v2\" both have series path \"a\/b\".\n\/\/ The module paths \"gopkg.in\/yaml.v1\" and \"gopkg.in\/yaml.v2\" both have series\n\/\/ path \"gopkg.in\/yaml\".\nfunc (v *ModuleInfo) SeriesPath() string {\n\treturn SeriesPathForModule(v.ModulePath)\n}\n\n\/\/ SeriesPathForModule returns the series path for the provided modulePath.\nfunc SeriesPathForModule(modulePath string) string {\n\tseriesPath, _, _ := module.SplitPathVersion(modulePath)\n\treturn seriesPath\n}\n\n\/\/ Suffix returns the suffix of the fullPath. It assumes that basePath is a\n\/\/ prefix of fullPath. If fullPath and basePath are the same, the empty string\n\/\/ is returned.\nfunc Suffix(fullPath, basePath string) string {\n\treturn strings.TrimPrefix(strings.TrimPrefix(fullPath, basePath), \"\/\")\n}\n\n\/\/ V1Path returns the path for version 1 of the package whose import path\n\/\/ is fullPath. If modulePath is the standard library, then V1Path returns\n\/\/ fullPath.\nfunc V1Path(fullPath, modulePath string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn fullPath\n\t}\n\treturn path.Join(SeriesPathForModule(modulePath), Suffix(fullPath, modulePath))\n}\n\n\/\/ A Module is a specific, reproducible build of a module.\ntype Module struct {\n\tLegacyModuleInfo\n\t\/\/ Licenses holds all licenses within this module version, including those\n\t\/\/ that may be contained in nested subdirectories.\n\tLicenses []*licenses.License\n\tUnits []*Unit\n\n\tLegacyPackages []*LegacyPackage\n}\n\n\/\/ IndexVersion holds the version information returned by the module index.\ntype IndexVersion struct {\n\tPath string\n\tVersion string\n\tTimestamp time.Time\n}\n\n\/\/ ModuleVersionState holds a worker module version state.\ntype ModuleVersionState struct {\n\tModulePath string\n\tVersion string\n\n\t\/\/ IndexTimestamp is the timestamp received from the Index for this version,\n\t\/\/ which should correspond to the time this version was committed to the\n\t\/\/ Index.\n\tIndexTimestamp time.Time\n\t\/\/ CreatedAt is the time this version was originally inserted into the\n\t\/\/ module version state table.\n\tCreatedAt time.Time\n\n\t\/\/ Status is the most recent HTTP status code received from the Fetch service\n\t\/\/ for this version, or nil if no request to the fetch service has been made.\n\tStatus int\n\t\/\/ Error is the most recent HTTP response body received from the Fetch\n\t\/\/ service, for a response with an unsuccessful status code. It is used for\n\t\/\/ debugging only, and has no semantic significance.\n\tError string\n\t\/\/ TryCount is the number of times a fetch of this version has been\n\t\/\/ attempted.\n\tTryCount int\n\t\/\/ LastProcessedAt is the last time this version was updated with a result\n\t\/\/ from the fetch service.\n\tLastProcessedAt *time.Time\n\t\/\/ NextProcessedAfter is the next time a fetch for this version should be\n\t\/\/ attempted.\n\tNextProcessedAfter time.Time\n\n\t\/\/ AppVersion is the value of the GAE_VERSION environment variable, which is\n\t\/\/ set by app engine. It is a timestamp in the format 20190709t112655 that\n\t\/\/ is close to, but not the same as, the deployment time. For example, the\n\t\/\/ deployment time for the above timestamp might be Jul 9, 2019, 11:29:59 AM.\n\tAppVersion string\n\n\t\/\/ GoModPath is the path declared in the go.mod file.\n\tGoModPath string\n\n\t\/\/ NumPackages it the number of packages that were processed as part of the\n\t\/\/ module (regardless of whether the processing was successful).\n\tNumPackages *int\n}\n\n\/\/ PackageVersionState holds a worker package version state. It is associated\n\/\/ with a given module version state.\ntype PackageVersionState struct {\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tStatus int\n\tError string\n}\n\n\/\/ SearchResult represents a single search result from SearchDocuments.\ntype SearchResult struct {\n\tName string\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tSynopsis string\n\tLicenses []string\n\n\tCommitTime time.Time\n\t\/\/ Score is used to sort items in an array of SearchResult.\n\tScore float64\n\n\t\/\/ NumImportedBy is the number of packages that import PackagePath.\n\tNumImportedBy uint64\n\n\t\/\/ NumResults is the total number of packages that were returned for this\n\t\/\/ search.\n\tNumResults uint64\n\t\/\/ Approximate reports whether NumResults is an approximate count. NumResults\n\t\/\/ can be approximate if search scanned only a subset of documents, and\n\t\/\/ result count is estimated using the hyperloglog algorithm.\n\tApproximate bool\n}\n<commit_msg>internal: add Module.Packages<commit_after>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n\t\"golang.org\/x\/pkgsite\/internal\/source\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n)\n\nconst (\n\t\/\/ LatestVersion signifies the latest available version in requests to the\n\t\/\/ proxy client.\n\tLatestVersion = \"latest\"\n\n\t\/\/ MasterVersion signifies the version at master.\n\tMasterVersion = \"master\"\n\n\t\/\/ UnknownModulePath signifies that the module path for a given package\n\t\/\/ path is ambiguous or not known. This is because requests to the\n\t\/\/ frontend can come in the form of <import-path>[@<version>], and it is\n\t\/\/ not clear which part of the import-path is the module path.\n\tUnknownModulePath = \"unknownModulePath\"\n)\n\n\/\/ ModuleInfo holds metadata associated with a module.\ntype ModuleInfo struct {\n\tModulePath string\n\tVersion string\n\tCommitTime time.Time\n\tIsRedistributable bool\n\tHasGoMod bool \/\/ whether the module zip has a go.mod file\n\tSourceInfo *source.Info\n}\n\n\/\/ VersionMap holds metadata associated with module queries for a version.\ntype VersionMap struct {\n\tModulePath string\n\tRequestedVersion string\n\tResolvedVersion string\n\tGoModPath string\n\tStatus int\n\tError string\n\tUpdatedAt time.Time\n}\n\n\/\/ SeriesPath returns the series path for the module.\n\/\/\n\/\/ A series is a group of modules that share the same base path and are assumed\n\/\/ to be major-version variants.\n\/\/\n\/\/ The series path is the module path without the version. For most modules,\n\/\/ this will be the module path for all module versions with major version 0 or\n\/\/ 1. For gopkg.in modules, the series path does not correspond to any module\n\/\/ version.\n\/\/\n\/\/ Examples:\n\/\/ The module paths \"a\/b\" and \"a\/b\/v2\" both have series path \"a\/b\".\n\/\/ The module paths \"gopkg.in\/yaml.v1\" and \"gopkg.in\/yaml.v2\" both have series\n\/\/ path \"gopkg.in\/yaml\".\nfunc (v *ModuleInfo) SeriesPath() string {\n\treturn SeriesPathForModule(v.ModulePath)\n}\n\n\/\/ SeriesPathForModule returns the series path for the provided modulePath.\nfunc SeriesPathForModule(modulePath string) string {\n\tseriesPath, _, _ := module.SplitPathVersion(modulePath)\n\treturn seriesPath\n}\n\n\/\/ Suffix returns the suffix of the fullPath. It assumes that basePath is a\n\/\/ prefix of fullPath. If fullPath and basePath are the same, the empty string\n\/\/ is returned.\nfunc Suffix(fullPath, basePath string) string {\n\treturn strings.TrimPrefix(strings.TrimPrefix(fullPath, basePath), \"\/\")\n}\n\n\/\/ V1Path returns the path for version 1 of the package whose import path\n\/\/ is fullPath. If modulePath is the standard library, then V1Path returns\n\/\/ fullPath.\nfunc V1Path(fullPath, modulePath string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn fullPath\n\t}\n\treturn path.Join(SeriesPathForModule(modulePath), Suffix(fullPath, modulePath))\n}\n\n\/\/ A Module is a specific, reproducible build of a module.\ntype Module struct {\n\tLegacyModuleInfo\n\t\/\/ Licenses holds all licenses within this module version, including those\n\t\/\/ that may be contained in nested subdirectories.\n\tLicenses []*licenses.License\n\tUnits []*Unit\n\n\tLegacyPackages []*LegacyPackage\n}\n\n\/\/ Packages returns all of the units for a module that are packages.\nfunc (m *Module) Packages() []*Unit {\n\tvar pkgs []*Unit\n\tfor _, u := range m.Units {\n\t\tif u.IsPackage() {\n\t\t\tpkgs = append(pkgs, u)\n\t\t}\n\t}\n\treturn pkgs\n}\n\n\/\/ IndexVersion holds the version information returned by the module index.\ntype IndexVersion struct {\n\tPath string\n\tVersion string\n\tTimestamp time.Time\n}\n\n\/\/ ModuleVersionState holds a worker module version state.\ntype ModuleVersionState struct {\n\tModulePath string\n\tVersion string\n\n\t\/\/ IndexTimestamp is the timestamp received from the Index for this version,\n\t\/\/ which should correspond to the time this version was committed to the\n\t\/\/ Index.\n\tIndexTimestamp time.Time\n\t\/\/ CreatedAt is the time this version was originally inserted into the\n\t\/\/ module version state table.\n\tCreatedAt time.Time\n\n\t\/\/ Status is the most recent HTTP status code received from the Fetch service\n\t\/\/ for this version, or nil if no request to the fetch service has been made.\n\tStatus int\n\t\/\/ Error is the most recent HTTP response body received from the Fetch\n\t\/\/ service, for a response with an unsuccessful status code. It is used for\n\t\/\/ debugging only, and has no semantic significance.\n\tError string\n\t\/\/ TryCount is the number of times a fetch of this version has been\n\t\/\/ attempted.\n\tTryCount int\n\t\/\/ LastProcessedAt is the last time this version was updated with a result\n\t\/\/ from the fetch service.\n\tLastProcessedAt *time.Time\n\t\/\/ NextProcessedAfter is the next time a fetch for this version should be\n\t\/\/ attempted.\n\tNextProcessedAfter time.Time\n\n\t\/\/ AppVersion is the value of the GAE_VERSION environment variable, which is\n\t\/\/ set by app engine. It is a timestamp in the format 20190709t112655 that\n\t\/\/ is close to, but not the same as, the deployment time. For example, the\n\t\/\/ deployment time for the above timestamp might be Jul 9, 2019, 11:29:59 AM.\n\tAppVersion string\n\n\t\/\/ GoModPath is the path declared in the go.mod file.\n\tGoModPath string\n\n\t\/\/ NumPackages it the number of packages that were processed as part of the\n\t\/\/ module (regardless of whether the processing was successful).\n\tNumPackages *int\n}\n\n\/\/ PackageVersionState holds a worker package version state. It is associated\n\/\/ with a given module version state.\ntype PackageVersionState struct {\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tStatus int\n\tError string\n}\n\n\/\/ SearchResult represents a single search result from SearchDocuments.\ntype SearchResult struct {\n\tName string\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tSynopsis string\n\tLicenses []string\n\n\tCommitTime time.Time\n\t\/\/ Score is used to sort items in an array of SearchResult.\n\tScore float64\n\n\t\/\/ NumImportedBy is the number of packages that import PackagePath.\n\tNumImportedBy uint64\n\n\t\/\/ NumResults is the total number of packages that were returned for this\n\t\/\/ search.\n\tNumResults uint64\n\t\/\/ Approximate reports whether NumResults is an approximate count. NumResults\n\t\/\/ can be approximate if search scanned only a subset of documents, and\n\t\/\/ result count is estimated using the hyperloglog algorithm.\n\tApproximate bool\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 ASoulDocs. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage store\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tgoldmarktoc \"github.com\/abhinav\/goldmark-toc\"\n\t\"github.com\/pkg\/errors\"\n\t\"gopkg.in\/ini.v1\"\n\n\t\"github.com\/asoul-sig\/asouldocs\/internal\/osutil\"\n)\n\n\/\/ TOC represents documentation hierarchy for a specific language.\ntype TOC struct {\n\tLanguage string \/\/ The language of the documentation\n\tNodes []*Node \/\/ Directories of the documentation\n\tPages []*Node \/\/ Individuals pages of the documentation\n\n\tnodes map[string]*Node \/\/ Key is the Node.Path\n}\n\n\/\/ Node is a node in the documentation hierarchy.\ntype Node struct {\n\tCategory string \/\/ The category (name) of the node, for directories and single pages, categories are empty\n\tPath string \/\/ The URL path\n\tLocalPath string \/\/ Full path with .md extension\n\n\tContent []byte \/\/ The content of the node\n\tTitle string \/\/ The title of the document in the given language\n\tHeadings goldmarktoc.Items \/\/ Headings in the node\n\n\tNodes []*Node \/\/ The list of sub-nodes\n\tPrevious *PageLink \/\/ The previous page\n\tNext *PageLink \/\/ The next page\n}\n\n\/\/ PageLink is a link to another page.\ntype PageLink struct {\n\tTitle string \/\/ The title of the page\n\tPath string \/\/ the path to the page\n}\n\n\/\/ Reload reloads and converts the content from local disk.\nfunc (n *Node) Reload(baseURLPath string) error {\n\tpathPrefix := path.Join(baseURLPath, strings.SplitN(n.Path, \"\/\", 2)[0])\n\tcontent, meta, headings, err := convertFile(pathPrefix, n.LocalPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.Content = content\n\tn.Title = fmt.Sprintf(\"%v\", meta[\"title\"])\n\tn.Headings = headings\n\n\tprevious, ok := meta[\"previous\"].(map[interface{}]interface{})\n\tif ok {\n\t\tn.Previous = &PageLink{\n\t\t\tTitle: fmt.Sprintf(\"%v\", previous[\"title\"]),\n\t\t\tPath: string(convertRelativeLink(pathPrefix, []byte(fmt.Sprintf(\"%v\", previous[\"path\"])))),\n\t\t}\n\t}\n\tnext, ok := meta[\"next\"].(map[interface{}]interface{})\n\tif ok {\n\t\tn.Next = &PageLink{\n\t\t\tTitle: fmt.Sprintf(\"%v\", next[\"title\"]),\n\t\t\tPath: string(convertRelativeLink(pathPrefix, []byte(fmt.Sprintf(\"%v\", next[\"path\"])))),\n\t\t}\n\t}\n\treturn nil\n}\n\nconst readme = \"README\"\n\n\/\/ initTocs initializes documentation hierarchy for given languages in the given\n\/\/ root directory.\nfunc initTocs(root string, languages []string, baseURLPath string) (map[string]*TOC, error) {\n\ttocPath := filepath.Join(root, \"toc.ini\")\n\ttocCfg, err := ini.Load(tocPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"load %q\", tocPath)\n\t}\n\n\tvar tocprint bytes.Buffer\n\ttocs := make(map[string]*TOC)\n\tfor i, lang := range languages {\n\t\ttocprint.WriteString(lang)\n\t\ttocprint.WriteString(\":\\n\")\n\n\t\ttoc := &TOC{\n\t\t\tLanguage: lang,\n\t\t\tnodes: make(map[string]*Node),\n\t\t}\n\n\t\tvar previous *Node\n\t\tsetPrevious := func(n *Node) {\n\t\t\tdefer func() {\n\t\t\t\tprevious = n\n\t\t\t}()\n\t\t\tif previous == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif n.Previous == nil {\n\t\t\t\tn.Previous = &PageLink{\n\t\t\t\t\tTitle: previous.Title,\n\t\t\t\t\tPath: previous.Path,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif previous.Next == nil {\n\t\t\t\tprevious.Next = &PageLink{\n\t\t\t\t\tTitle: n.Title,\n\t\t\t\t\tPath: n.Path,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tdirs := tocCfg.Section(\"\").KeyStrings()\n\t\ttoc.Nodes = make([]*Node, 0, len(dirs))\n\t\tfor _, dir := range dirs {\n\t\t\tdirname := tocCfg.Section(\"\").Key(dir).String()\n\t\t\tfiles := tocCfg.Section(dirname).KeyStrings()\n\t\t\t\/\/ Skip empty directory\n\t\t\tif len(files) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttocprint.WriteString(dirname)\n\t\t\ttocprint.WriteString(\"\/\\n\")\n\n\t\t\tdirNode := &Node{\n\t\t\t\tPath: dirname,\n\t\t\t\tNodes: make([]*Node, 0, len(files)-1),\n\t\t\t}\n\t\t\ttoc.Nodes = append(toc.Nodes, dirNode)\n\t\t\ttoc.nodes[dirNode.Path] = dirNode\n\n\t\t\tif tocCfg.Section(dirname).HasValue(readme) {\n\t\t\t\tlocalpath := filepath.Join(root, lang, dirNode.Path, readme+\".md\")\n\t\t\t\tif i > 0 && !osutil.IsFile(localpath) {\n\t\t\t\t\tcontinue \/\/ It is OK to have missing file for non-default language\n\t\t\t\t}\n\n\t\t\t\tdirNode.LocalPath = localpath\n\t\t\t\terr = dirNode.Reload(baseURLPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Wrapf(err, \"reload node from %q\", dirNode.LocalPath)\n\t\t\t\t}\n\n\t\t\t\tif len(dirNode.Content) > 0 {\n\t\t\t\t\tsetPrevious(dirNode)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, file := range files {\n\t\t\t\tfilename := tocCfg.Section(dirname).Key(file).String()\n\t\t\t\tif filename == readme {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlocalpath := filepath.Join(root, lang, dirname, filename) + \".md\"\n\t\t\t\tif i > 0 && !osutil.IsFile(localpath) {\n\t\t\t\t\tcontinue \/\/ It is OK to have missing file for non-default language\n\t\t\t\t}\n\n\t\t\t\tnode := &Node{\n\t\t\t\t\tCategory: dirNode.Title,\n\t\t\t\t\tPath: path.Join(dirname, filename),\n\t\t\t\t\tLocalPath: localpath,\n\t\t\t\t}\n\t\t\t\tdirNode.Nodes = append(dirNode.Nodes, node)\n\t\t\t\ttoc.nodes[node.Path] = node\n\n\t\t\t\terr = node.Reload(baseURLPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Wrapf(err, \"reload node from %q\", node.LocalPath)\n\t\t\t\t}\n\n\t\t\t\tsetPrevious(node)\n\t\t\t\ttocprint.WriteString(strings.Repeat(\" \", len(dirname)))\n\t\t\t\ttocprint.WriteString(\"|__\")\n\t\t\t\ttocprint.WriteString(filename)\n\t\t\t\ttocprint.WriteString(\"\\n\")\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Single pages\n\t\tpages := tocCfg.Section(\"pages\").KeysHash()\n\t\ttoc.Pages = make([]*Node, 0, len(pages))\n\t\tfor _, page := range pages {\n\t\t\ttocprint.WriteString(page)\n\t\t\ttocprint.WriteString(\"\\n\")\n\n\t\t\tnode := &Node{\n\t\t\t\tPath: page,\n\t\t\t\tLocalPath: filepath.Join(root, lang, page) + \".md\",\n\t\t\t}\n\t\t\ttoc.Pages = append(toc.Pages, node)\n\t\t\ttoc.nodes[node.Path] = node\n\n\t\t\terr = node.Reload(\"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"reload node from %q\", node.LocalPath)\n\t\t\t}\n\t\t}\n\n\t\ttocs[lang] = toc\n\t}\n\n\tfmt.Print(tocprint.String())\n\treturn tocs, nil\n}\n<commit_msg>store: fix relative link of auto previous and next<commit_after>\/\/ Copyright 2022 ASoulDocs. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage store\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tgoldmarktoc \"github.com\/abhinav\/goldmark-toc\"\n\t\"github.com\/pkg\/errors\"\n\t\"gopkg.in\/ini.v1\"\n\n\t\"github.com\/asoul-sig\/asouldocs\/internal\/osutil\"\n)\n\n\/\/ TOC represents documentation hierarchy for a specific language.\ntype TOC struct {\n\tLanguage string \/\/ The language of the documentation\n\tNodes []*Node \/\/ Directories of the documentation\n\tPages []*Node \/\/ Individuals pages of the documentation\n\n\tnodes map[string]*Node \/\/ Key is the Node.Path\n}\n\n\/\/ Node is a node in the documentation hierarchy.\ntype Node struct {\n\tCategory string \/\/ The category (name) of the node, for directories and single pages, categories are empty\n\tPath string \/\/ The URL path\n\tLocalPath string \/\/ Full path with .md extension\n\n\tContent []byte \/\/ The content of the node\n\tTitle string \/\/ The title of the document in the given language\n\tHeadings goldmarktoc.Items \/\/ Headings in the node\n\n\tNodes []*Node \/\/ The list of sub-nodes\n\tPrevious *PageLink \/\/ The previous page\n\tNext *PageLink \/\/ The next page\n}\n\n\/\/ PageLink is a link to another page.\ntype PageLink struct {\n\tTitle string \/\/ The title of the page\n\tPath string \/\/ the path to the page\n}\n\n\/\/ Reload reloads and converts the content from local disk.\nfunc (n *Node) Reload(baseURLPath string) error {\n\tpathPrefix := path.Join(baseURLPath, strings.SplitN(n.Path, \"\/\", 2)[0])\n\tcontent, meta, headings, err := convertFile(pathPrefix, n.LocalPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.Content = content\n\tn.Title = fmt.Sprintf(\"%v\", meta[\"title\"])\n\tn.Headings = headings\n\n\tprevious, ok := meta[\"previous\"].(map[interface{}]interface{})\n\tif ok {\n\t\tn.Previous = &PageLink{\n\t\t\tTitle: fmt.Sprintf(\"%v\", previous[\"title\"]),\n\t\t\tPath: string(convertRelativeLink(pathPrefix, []byte(fmt.Sprintf(\"%v\", previous[\"path\"])))),\n\t\t}\n\t}\n\tnext, ok := meta[\"next\"].(map[interface{}]interface{})\n\tif ok {\n\t\tn.Next = &PageLink{\n\t\t\tTitle: fmt.Sprintf(\"%v\", next[\"title\"]),\n\t\t\tPath: string(convertRelativeLink(pathPrefix, []byte(fmt.Sprintf(\"%v\", next[\"path\"])))),\n\t\t}\n\t}\n\treturn nil\n}\n\nconst readme = \"README\"\n\n\/\/ initTocs initializes documentation hierarchy for given languages in the given\n\/\/ root directory.\nfunc initTocs(root string, languages []string, baseURLPath string) (map[string]*TOC, error) {\n\ttocPath := filepath.Join(root, \"toc.ini\")\n\ttocCfg, err := ini.Load(tocPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"load %q\", tocPath)\n\t}\n\n\tvar tocprint bytes.Buffer\n\ttocs := make(map[string]*TOC)\n\tfor i, lang := range languages {\n\t\ttocprint.WriteString(lang)\n\t\ttocprint.WriteString(\":\\n\")\n\n\t\ttoc := &TOC{\n\t\t\tLanguage: lang,\n\t\t\tnodes: make(map[string]*Node),\n\t\t}\n\n\t\tvar previous *Node\n\t\tsetPrevious := func(n *Node) {\n\t\t\tdefer func() {\n\t\t\t\tprevious = n\n\t\t\t}()\n\t\t\tif previous == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif n.Previous == nil {\n\t\t\t\tn.Previous = &PageLink{\n\t\t\t\t\tTitle: previous.Title,\n\t\t\t\t\tPath: string(convertRelativeLink(baseURLPath, []byte(previous.Path))),\n\t\t\t\t}\n\t\t\t}\n\t\t\tif previous.Next == nil {\n\t\t\t\tprevious.Next = &PageLink{\n\t\t\t\t\tTitle: n.Title,\n\t\t\t\t\tPath: string(convertRelativeLink(baseURLPath, []byte(n.Path))),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tdirs := tocCfg.Section(\"\").KeyStrings()\n\t\ttoc.Nodes = make([]*Node, 0, len(dirs))\n\t\tfor _, dir := range dirs {\n\t\t\tdirname := tocCfg.Section(\"\").Key(dir).String()\n\t\t\tfiles := tocCfg.Section(dirname).KeyStrings()\n\t\t\t\/\/ Skip empty directory\n\t\t\tif len(files) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttocprint.WriteString(dirname)\n\t\t\ttocprint.WriteString(\"\/\\n\")\n\n\t\t\tdirNode := &Node{\n\t\t\t\tPath: dirname,\n\t\t\t\tNodes: make([]*Node, 0, len(files)-1),\n\t\t\t}\n\t\t\ttoc.Nodes = append(toc.Nodes, dirNode)\n\t\t\ttoc.nodes[dirNode.Path] = dirNode\n\n\t\t\tif tocCfg.Section(dirname).HasValue(readme) {\n\t\t\t\tlocalpath := filepath.Join(root, lang, dirNode.Path, readme+\".md\")\n\t\t\t\tif i > 0 && !osutil.IsFile(localpath) {\n\t\t\t\t\tcontinue \/\/ It is OK to have missing file for non-default language\n\t\t\t\t}\n\n\t\t\t\tdirNode.LocalPath = localpath\n\t\t\t\terr = dirNode.Reload(baseURLPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Wrapf(err, \"reload node from %q\", dirNode.LocalPath)\n\t\t\t\t}\n\n\t\t\t\tif len(dirNode.Content) > 0 {\n\t\t\t\t\tsetPrevious(dirNode)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, file := range files {\n\t\t\t\tfilename := tocCfg.Section(dirname).Key(file).String()\n\t\t\t\tif filename == readme {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlocalpath := filepath.Join(root, lang, dirname, filename) + \".md\"\n\t\t\t\tif i > 0 && !osutil.IsFile(localpath) {\n\t\t\t\t\tcontinue \/\/ It is OK to have missing file for non-default language\n\t\t\t\t}\n\n\t\t\t\tnode := &Node{\n\t\t\t\t\tCategory: dirNode.Title,\n\t\t\t\t\tPath: path.Join(dirname, filename),\n\t\t\t\t\tLocalPath: localpath,\n\t\t\t\t}\n\t\t\t\tdirNode.Nodes = append(dirNode.Nodes, node)\n\t\t\t\ttoc.nodes[node.Path] = node\n\n\t\t\t\terr = node.Reload(baseURLPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Wrapf(err, \"reload node from %q\", node.LocalPath)\n\t\t\t\t}\n\n\t\t\t\tsetPrevious(node)\n\t\t\t\ttocprint.WriteString(strings.Repeat(\" \", len(dirname)))\n\t\t\t\ttocprint.WriteString(\"|__\")\n\t\t\t\ttocprint.WriteString(filename)\n\t\t\t\ttocprint.WriteString(\"\\n\")\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Single pages\n\t\tpages := tocCfg.Section(\"pages\").KeysHash()\n\t\ttoc.Pages = make([]*Node, 0, len(pages))\n\t\tfor _, page := range pages {\n\t\t\ttocprint.WriteString(page)\n\t\t\ttocprint.WriteString(\"\\n\")\n\n\t\t\tnode := &Node{\n\t\t\t\tPath: page,\n\t\t\t\tLocalPath: filepath.Join(root, lang, page) + \".md\",\n\t\t\t}\n\t\t\ttoc.Pages = append(toc.Pages, node)\n\t\t\ttoc.nodes[node.Path] = node\n\n\t\t\terr = node.Reload(\"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"reload node from %q\", node.LocalPath)\n\t\t\t}\n\t\t}\n\n\t\ttocs[lang] = toc\n\t}\n\n\tfmt.Print(tocprint.String())\n\treturn tocs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2018 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage tags\n\nimport (\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/config\"\n\tcgm \"github.com\/circonus-labs\/circonus-gometrics\/v3\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Tag aliases cgm's Tag to centralize definition\ntype Tag = cgm.Tag\n\n\/\/ Tags aliases cgm's Tags to centralize definition\ntype Tags = cgm.Tags\n\n\/\/ TaggedMetric definefs a tagged metric\ntype TaggedMetric struct {\n\tTags *Tags `json:\"_tags\"`\n\tType string `json:\"_type\"`\n\tValue interface{} `json:\"_value\"`\n}\n\n\/\/ TaggedMetrics is a list of metrics with tags\ntype TaggedMetrics map[string]TaggedMetric\n\n\/\/ JSONMetric defines an individual metric received in JSON\ntype JSONMetric struct {\n\tTags []string `json:\"_tags\"`\n\tType string `json:\"_type\"`\n\tValue interface{} `json:\"_value\"`\n}\n\n\/\/ JSONMetrics holds list of JSON metrics received at \/write receiver interface\ntype JSONMetrics map[string]JSONMetric\n\nconst (\n\t\/\/ Delimiter defines character separating category from value in a tag e.g. location:london\n\tDelimiter = \":\"\n\t\/\/ Separator defines character separating tags in a list e.g. os:centos,location:sfo\n\tSeparator = \",\"\n\treplacementChar = \"_\"\n)\n\nvar (\n\tvalid = regexp.MustCompile(`^[^:,]+:[^:,]+(,[^:,]+:[^:,]+)*$`)\n\tcleaner = regexp.MustCompile(`[\\[\\]'\"` + \"`]\")\n\tbaseTags *[]string\n)\n\n\/\/ GetBaseTags returns the check.tags as a list if check.metric_streamtags is true\n\/\/ ensuring that all metrics have, at a minimum, the same base set of tags\nfunc GetBaseTags() []string {\n\tif baseTags != nil {\n\t\treturn *baseTags\n\t}\n\n\tbaseTags = &[]string{}\n\n\tif !viper.GetBool(config.KeyCheckMetricStreamtags) {\n\t\treturn *baseTags\n\t}\n\n\t\/\/ check.tags is a comma separated list of key:value pairs\n\t\/\/ a) backwards support for how tags were specified in NAD and the JS version of cosi\n\t\/\/ b) viper (at the moment) handles stringSlices different between command line and environment (sigh)\n\t\/\/ command line takes comma separated list, environment only takes space separated list (tags can contain spaces...)\n\ttagSpec := viper.GetString(config.KeyCheckTags)\n\tif tagSpec == \"\" {\n\t\treturn *baseTags\n\t}\n\n \/\/ if systemd ExecStart=circonus-agentd --check-tags=\"c1:v1,c2:v1\" syntax is\n \/\/ used, tagSpec will literally be `\"c1:v1,c2:v1\"` with the quotes included\n \/\/ resulting in the first tag having a leading '\"' and the last tag having\n \/\/ a trailing '\"'...\n if tagSpec[0:1] == `\"` {\n tagSpec = tagSpec[1:]\n if tagSpec[len(tagSpec)-1:1] == `\"` {\n tagSpec = tagSpec[0:len(tagSpec)-1]\n }\n }\n\n\tcheckTags := strings.Split(tagSpec, Separator)\n\tif len(checkTags) == 0 {\n\t\treturn *baseTags\n\t}\n\n\ttags := make([]string, 0, len(checkTags))\n\ttags = append(tags, checkTags...)\n\tbaseTags = &tags\n\n\treturn *baseTags\n}\n\n\/\/ FromString convert old style tag string spec \"cat:val,cat:val,...\" into a Tags structure\nfunc FromString(tags string) Tags {\n\tif tags == \"\" || !valid.MatchString(tags) {\n\t\treturn Tags{}\n\t}\n\ttagList := strings.Split(tags, Separator)\n\treturn FromList(tagList)\n}\n\n\/\/ FromList convert old style list of tags []string{\"cat:val\",\"cat:val\",...} into a Tags structure\nfunc FromList(tagList []string) Tags {\n\tif len(tagList) == 0 {\n\t\treturn Tags{}\n\t}\n\n\ttags := make(Tags, 0, len(tagList))\n\tfor _, tag := range tagList {\n\t\tt := strings.Split(tag, Delimiter)\n\t\tif len(t) != 2 {\n\t\t\tcontinue \/\/ must be *only* two, if there are multiple occurances of Delimiter, tag is invalid\n\t\t}\n\t\ttags = append(tags, Tag{t[0], t[1]})\n\t}\n\n\treturn tags\n}\n\n\/\/ PrepStreamTags accepts a comma delimited list of key:value pairs\n\/\/ and returns a stream tag formatted spec or an error if there are\n\/\/ issues with the format of the supplied tag list.\nfunc PrepStreamTags(tagList string) (string, error) {\n\tif tagList == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tif !valid.MatchString(tagList) {\n\t\treturn \"\", errors.Errorf(\"invalid tag format\")\n\t}\n\n\tt := strings.Split(cleaner.ReplaceAllString(tagList, replacementChar), Separator)\n\n\t\/\/ so that components which treat metric names as simple strings\n\t\/\/ receive a consistent, predictive metric name\n\tsort.Strings(t)\n\n\treturn \"|ST[\" + strings.Join(t, Separator) + \"]\", nil\n}\n<commit_msg>upd: support colon in tag values<commit_after>\/\/ Copyright © 2018 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage tags\n\nimport (\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/config\"\n\tcgm \"github.com\/circonus-labs\/circonus-gometrics\/v3\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Tag aliases cgm's Tag to centralize definition\ntype Tag = cgm.Tag\n\n\/\/ Tags aliases cgm's Tags to centralize definition\ntype Tags = cgm.Tags\n\n\/\/ TaggedMetric definefs a tagged metric\ntype TaggedMetric struct {\n\tTags *Tags `json:\"_tags\"`\n\tType string `json:\"_type\"`\n\tValue interface{} `json:\"_value\"`\n}\n\n\/\/ TaggedMetrics is a list of metrics with tags\ntype TaggedMetrics map[string]TaggedMetric\n\n\/\/ JSONMetric defines an individual metric received in JSON\ntype JSONMetric struct {\n\tTags []string `json:\"_tags\"`\n\tType string `json:\"_type\"`\n\tValue interface{} `json:\"_value\"`\n}\n\n\/\/ JSONMetrics holds list of JSON metrics received at \/write receiver interface\ntype JSONMetrics map[string]JSONMetric\n\nconst (\n\t\/\/ Delimiter defines character separating category from value in a tag e.g. location:london\n\tDelimiter = \":\"\n\t\/\/ Separator defines character separating tags in a list e.g. os:centos,location:sfo\n\tSeparator = \",\"\n\treplacementChar = \"_\"\n)\n\nvar (\n\tvalid = regexp.MustCompile(`^[^:,]+:[^:,]+(,[^:,]+:[^:,]+)*$`)\n\tcleaner = regexp.MustCompile(`[\\[\\]'\"` + \"`]\")\n\tbaseTags *[]string\n)\n\n\/\/ GetBaseTags returns the check.tags as a list if check.metric_streamtags is true\n\/\/ ensuring that all metrics have, at a minimum, the same base set of tags\nfunc GetBaseTags() []string {\n\tif baseTags != nil {\n\t\treturn *baseTags\n\t}\n\n\tbaseTags = &[]string{}\n\n\tif !viper.GetBool(config.KeyCheckMetricStreamtags) {\n\t\treturn *baseTags\n\t}\n\n\t\/\/ check.tags is a comma separated list of key:value pairs\n\t\/\/ a) backwards support for how tags were specified in NAD and the JS version of cosi\n\t\/\/ b) viper (at the moment) handles stringSlices different between command line and environment (sigh)\n\t\/\/ command line takes comma separated list, environment only takes space separated list (tags can contain spaces...)\n\ttagSpec := viper.GetString(config.KeyCheckTags)\n\tif tagSpec == \"\" {\n\t\treturn *baseTags\n\t}\n\n\t\/\/ if systemd ExecStart=circonus-agentd --check-tags=\"c1:v1,c2:v1\" syntax is\n\t\/\/ used, tagSpec will literally be `\"c1:v1,c2:v1\"` with the quotes included\n\t\/\/ resulting in the first tag having a leading '\"' and the last tag having\n\t\/\/ a trailing '\"'...\n\tif tagSpec[0:1] == `\"` {\n\t\ttagSpec = tagSpec[1:]\n\t\tif tagSpec[len(tagSpec)-1:1] == `\"` {\n\t\t\ttagSpec = tagSpec[0 : len(tagSpec)-1]\n\t\t}\n\t}\n\n\tcheckTags := strings.Split(tagSpec, Separator)\n\tif len(checkTags) == 0 {\n\t\treturn *baseTags\n\t}\n\n\ttags := make([]string, 0, len(checkTags))\n\ttags = append(tags, checkTags...)\n\tbaseTags = &tags\n\n\treturn *baseTags\n}\n\n\/\/ FromString convert old style tag string spec \"cat:val,cat:val,...\" into a Tags structure\nfunc FromString(tags string) Tags {\n\tif tags == \"\" || !valid.MatchString(tags) {\n\t\treturn Tags{}\n\t}\n\ttagList := strings.Split(tags, Separator)\n\treturn FromList(tagList)\n}\n\n\/\/ FromList convert old style list of tags []string{\"cat:val\",\"cat:val\",...} into a Tags structure\nfunc FromList(tagList []string) Tags {\n\tif len(tagList) == 0 {\n\t\treturn Tags{}\n\t}\n\n\ttags := make(Tags, 0, len(tagList))\n\tfor _, tag := range tagList {\n\t\tt := strings.SplitN(tag, Delimiter, 2)\n\t\tif len(t) != 2 {\n\t\t\tcontinue \/\/ must be *only* two\n\t\t}\n\t\ttags = append(tags, Tag{t[0], t[1]})\n\t}\n\n\treturn tags\n}\n\n\/\/ PrepStreamTags accepts a comma delimited list of key:value pairs\n\/\/ and returns a stream tag formatted spec or an error if there are\n\/\/ issues with the format of the supplied tag list.\nfunc PrepStreamTags(tagList string) (string, error) {\n\tif tagList == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tif !valid.MatchString(tagList) {\n\t\treturn \"\", errors.Errorf(\"invalid tag format\")\n\t}\n\n\tt := strings.Split(cleaner.ReplaceAllString(tagList, replacementChar), Separator)\n\n\t\/\/ so that components which treat metric names as simple strings\n\t\/\/ receive a consistent, predictive metric name\n\tsort.Strings(t)\n\n\treturn \"|ST[\" + strings.Join(t, Separator) + \"]\", nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Ceph-CSI Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ceph\/ceph-csi\/internal\/util\/log\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\"\n\t\"k8s.io\/cloud-provider\/volume\/helpers\"\n\t\"k8s.io\/utils\/mount\"\n)\n\n\/\/ RoundOffVolSize rounds up given quantity up to chunks of MiB\/GiB.\nfunc RoundOffVolSize(size int64) int64 {\n\tsize = RoundOffBytes(size)\n\t\/\/ convert size back to MiB for rbd CLI\n\treturn size \/ helpers.MiB\n}\n\n\/\/ RoundOffBytes converts roundoff the size\n\/\/ 1.1Mib will be round off to 2Mib same for GiB\n\/\/ size less than 1MiB will be round off to 1MiB.\nfunc RoundOffBytes(bytes int64) int64 {\n\tvar num int64\n\t\/\/ round off the value if its in decimal\n\tif floatBytes := float64(bytes); floatBytes < helpers.GiB {\n\t\tnum = int64(math.Ceil(floatBytes \/ helpers.MiB))\n\t\tnum *= helpers.MiB\n\t} else {\n\t\tnum = int64(math.Ceil(floatBytes \/ helpers.GiB))\n\t\tnum *= helpers.GiB\n\t}\n\n\treturn num\n}\n\n\/\/ variables which will be set during the build time.\nvar (\n\t\/\/ GitCommit tell the latest git commit image is built from.\n\tGitCommit string\n\t\/\/ DriverVersion which will be driver version.\n\tDriverVersion string\n)\n\n\/\/ Config holds the parameters list which can be configured.\ntype Config struct {\n\tVtype string \/\/ driver type [rbd|cephfs|liveness|controller]\n\tEndpoint string \/\/ CSI endpoint\n\tDriverName string \/\/ name of the driver\n\tDriverNamespace string \/\/ namespace in which driver is deployed\n\tNodeID string \/\/ node id\n\tInstanceID string \/\/ unique ID distinguishing this instance of Ceph CSI\n\tPluginPath string \/\/ location of cephcsi plugin\n\tStagingPath string \/\/ location of cephcsi staging path\n\tDomainLabels string \/\/ list of domain labels to read from the node\n\n\t\/\/ metrics related flags\n\tMetricsPath string \/\/ path of prometheus endpoint where metrics will be available\n\tHistogramOption string \/\/ Histogram option for grpc metrics, should be comma separated value,\n\t\/\/ ex:= \"0.5,2,6\" where start=0.5 factor=2, count=6\n\tMetricsIP string \/\/ TCP port for liveness\/ metrics requests\n\tPidLimit int \/\/ PID limit to configure through cgroups\")\n\tMetricsPort int \/\/ TCP port for liveness\/grpc metrics requests\n\tPollTime time.Duration \/\/ time interval in seconds between each poll\n\tPoolTimeout time.Duration \/\/ probe timeout in seconds\n\tEnableGRPCMetrics bool \/\/ option to enable grpc metrics\n\n\tEnableProfiling bool \/\/ flag to enable profiling\n\tIsControllerServer bool \/\/ if set to true start provisoner server\n\tIsNodeServer bool \/\/ if set to true start node server\n\tVersion bool \/\/ cephcsi version\n\n\t\/\/ SkipForceFlatten is set to false if the kernel supports mounting of\n\t\/\/ rbd image or the image chain has the deep-flatten feature.\n\tSkipForceFlatten bool\n\n\t\/\/ cephfs related flags\n\tForceKernelCephFS bool \/\/ force to use the ceph kernel client even if the kernel is < 4.17\n\n\t\/\/ RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten\n\t\/\/ occurs\n\tRbdHardMaxCloneDepth uint\n\n\t\/\/ RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten\n\t\/\/ occurs\n\tRbdSoftMaxCloneDepth uint\n\n\t\/\/ MaxSnapshotsOnImage represents the maximum number of snapshots allowed\n\t\/\/ on rbd image without flattening, once the limit is reached cephcsi will\n\t\/\/ start flattening the older rbd images to allow more snapshots\n\tMaxSnapshotsOnImage uint\n\n\t\/\/ MinSnapshotsOnImage represents the soft limit for maximum number of\n\t\/\/ snapshots allowed on rbd image without flattening, once the soft limit is\n\t\/\/ reached cephcsi will start flattening the older rbd images.\n\tMinSnapshotsOnImage uint\n}\n\n\/\/ ValidateDriverName validates the driver name.\nfunc ValidateDriverName(driverName string) error {\n\tif driverName == \"\" {\n\t\treturn errors.New(\"driver name is empty\")\n\t}\n\n\tconst reqDriverNameLen = 63\n\tif len(driverName) > reqDriverNameLen {\n\t\treturn errors.New(\"driver name length should be less than 63 chars\")\n\t}\n\tvar err error\n\tfor _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {\n\t\tif err == nil {\n\t\t\terr = errors.New(msg)\n\n\t\t\tcontinue\n\t\t}\n\t\terr = fmt.Errorf(\"%s: %w\", msg, err)\n\t}\n\n\treturn err\n}\n\n\/\/ GetKernelVersion returns the version of the running Unix (like) system from the\n\/\/ 'utsname' structs 'release' component.\nfunc GetKernelVersion() (string, error) {\n\tutsname := unix.Utsname{}\n\tif err := unix.Uname(&utsname); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.TrimRight(string(utsname.Release[:]), \"\\x00\"), nil\n}\n\n\/\/ KernelVersion holds kernel related information.\ntype KernelVersion struct {\n\tVersion int\n\tPatchLevel int\n\tSubLevel int\n\tExtraVersion int \/\/ prefix of the part after the first \"-\"\n\tDistribution string \/\/ component of full extraversion\n\tBackport bool \/\/ backports have a fixed version\/patchlevel\/sublevel\n}\n\n\/\/ parseKernelRelease parses a kernel release version string into:\n\/\/ version, patch version, sub version and extra version.\nfunc parseKernelRelease(release string) (int, int, int, int, error) {\n\tversion := 0\n\tpatchlevel := 0\n\tminVersions := 2\n\n\textra := \"\"\n\tn, err := fmt.Sscanf(release, \"%d.%d%s\", &version, &patchlevel, &extra)\n\tif n < minVersions && err != nil {\n\t\treturn 0, 0, 0, 0, fmt.Errorf(\"failed to parse version and patchlevel from %s: %w\", release, err)\n\t}\n\n\tsublevel := 0\n\textraversion := 0\n\tif n > minVersions {\n\t\tn, err = fmt.Sscanf(extra, \".%d%s\", &sublevel, &extra)\n\t\tif err != nil && n == 0 && len(extra) > 0 && extra[0] != '-' && extra[0] == '.' {\n\t\t\treturn 0, 0, 0, 0, fmt.Errorf(\"failed to parse subversion from %s: %w\", release, err)\n\t\t}\n\n\t\textra = strings.TrimPrefix(extra, \"-\")\n\t\t\/\/ ignore errors, 1st component of extraversion does not need to be an int\n\t\t_, err = fmt.Sscanf(extra, \"%d\", &extraversion)\n\t\tif err != nil {\n\t\t\t\/\/ \"go lint\" wants err to be checked...\n\t\t\textraversion = 0\n\t\t}\n\t}\n\n\treturn version, patchlevel, sublevel, extraversion, nil\n}\n\n\/\/ CheckKernelSupport checks the running kernel and comparing it to known\n\/\/ versions that have support for required features . Distributors of\n\/\/ enterprise Linux have backported quota support to previous versions. This\n\/\/ function checks if the running kernel is one of the versions that have the\n\/\/ feature\/fixes backported.\n\/\/\n\/\/ `uname -r` (or Uname().Utsname.Release has a format like 1.2.3-rc.vendor\n\/\/ This can be slit up in the following components: - version (1) - patchlevel\n\/\/ (2) - sublevel (3) - optional, defaults to 0 - extraversion (rc) - optional,\n\/\/ matching integers only - distribution (.vendor) - optional, match against\n\/\/ whole `uname -r` string\n\/\/\n\/\/ For matching multiple versions, the kernelSupport type contains a backport\n\/\/ bool, which will cause matching\n\/\/ version+patchlevel+sublevel+(>=extraversion)+(~distribution)\n\/\/\n\/\/ In case the backport bool is false, a simple check for higher versions than\n\/\/ version+patchlevel+sublevel is done.\nfunc CheckKernelSupport(release string, supportedVersions []KernelVersion) bool {\n\tversion, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)\n\tif err != nil {\n\t\tlog.ErrorLogMsg(\"%v\", err)\n\n\t\treturn false\n\t}\n\n\t\/\/ compare running kernel against known versions\n\tfor _, kernel := range supportedVersions {\n\t\tif !kernel.Backport {\n\t\t\t\/\/ deal with the default case(s), find >= match for version, patchlevel, sublevel\n\t\t\tif version > kernel.Version || (version == kernel.Version && patchlevel > kernel.PatchLevel) ||\n\t\t\t\t(version == kernel.Version && patchlevel == kernel.PatchLevel && sublevel >= kernel.SubLevel) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ specific backport, match distribution initially\n\t\t\tif !strings.Contains(release, kernel.Distribution) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ strict match version, patchlevel, sublevel, and >= match extraversion\n\t\t\tif version == kernel.Version && patchlevel == kernel.PatchLevel &&\n\t\t\t\tsublevel == kernel.SubLevel && extraversion >= kernel.ExtraVersion {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tlog.ErrorLogMsg(\"kernel %s does not support required features\", release)\n\n\treturn false\n}\n\n\/\/ GenerateVolID generates a volume ID based on passed in parameters and version, to be returned\n\/\/ to the CO system.\nfunc GenerateVolID(\n\tctx context.Context,\n\tmonitors string,\n\tcr *Credentials,\n\tlocationID int64,\n\tpool, clusterID, objUUID string,\n\tvolIDVersion uint16) (string, error) {\n\tvar err error\n\n\tif locationID == InvalidPoolID {\n\t\tlocationID, err = GetPoolID(monitors, cr, pool)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t\/\/ generate the volume ID to return to the CO system\n\tvi := CSIIdentifier{\n\t\tLocationID: locationID,\n\t\tEncodingVersion: volIDVersion,\n\t\tClusterID: clusterID,\n\t\tObjectUUID: objUUID,\n\t}\n\n\tvolID, err := vi.ComposeCSIID()\n\n\treturn volID, err\n}\n\n\/\/ CreateMountPoint creates the directory with given path.\nfunc CreateMountPoint(mountPath string) error {\n\treturn os.MkdirAll(mountPath, 0o750)\n}\n\n\/\/ checkDirExists checks directory exists or not.\nfunc checkDirExists(p string) bool {\n\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ IsMountPoint checks if the given path is mountpoint or not.\nfunc IsMountPoint(p string) (bool, error) {\n\tdummyMount := mount.New(\"\")\n\tnotMnt, err := dummyMount.IsLikelyNotMountPoint(p)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn !notMnt, nil\n}\n\n\/\/ Mount mounts the source to target path.\nfunc Mount(source, target, fstype string, options []string) error {\n\tdummyMount := mount.New(\"\")\n\n\treturn dummyMount.Mount(source, target, fstype, options)\n}\n\n\/\/ MountOptionsAdd adds the `add` mount options to the `options` and returns a\n\/\/ new string. In case `add` is already present in the `options`, `add` is not\n\/\/ added again.\nfunc MountOptionsAdd(options string, add ...string) string {\n\topts := strings.Split(options, \",\")\n\tnewOpts := []string{}\n\t\/\/ clean original options from empty strings\n\tfor _, opt := range opts {\n\t\tif opt != \"\" {\n\t\t\tnewOpts = append(newOpts, opt)\n\t\t}\n\t}\n\n\tfor _, opt := range add {\n\t\tif opt != \"\" && !contains(newOpts, opt) {\n\t\t\tnewOpts = append(newOpts, opt)\n\t\t}\n\t}\n\n\treturn strings.Join(newOpts, \",\")\n}\n\nfunc contains(s []string, key string) bool {\n\tfor _, v := range s {\n\t\tif v == key {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ CallStack returns the stack of the calls in the current goroutine. Useful\n\/\/ for debugging or reporting errors. This is a friendly alternative to\n\/\/ assert() or panic().\nfunc CallStack() string {\n\tstack := make([]byte, 2048)\n\t_ = runtime.Stack(stack, false)\n\n\treturn string(stack)\n}\n<commit_msg>cleanup: fix log level<commit_after>\/*\nCopyright 2019 The Ceph-CSI Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ceph\/ceph-csi\/internal\/util\/log\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\"\n\t\"k8s.io\/cloud-provider\/volume\/helpers\"\n\t\"k8s.io\/utils\/mount\"\n)\n\n\/\/ RoundOffVolSize rounds up given quantity up to chunks of MiB\/GiB.\nfunc RoundOffVolSize(size int64) int64 {\n\tsize = RoundOffBytes(size)\n\t\/\/ convert size back to MiB for rbd CLI\n\treturn size \/ helpers.MiB\n}\n\n\/\/ RoundOffBytes converts roundoff the size\n\/\/ 1.1Mib will be round off to 2Mib same for GiB\n\/\/ size less than 1MiB will be round off to 1MiB.\nfunc RoundOffBytes(bytes int64) int64 {\n\tvar num int64\n\t\/\/ round off the value if its in decimal\n\tif floatBytes := float64(bytes); floatBytes < helpers.GiB {\n\t\tnum = int64(math.Ceil(floatBytes \/ helpers.MiB))\n\t\tnum *= helpers.MiB\n\t} else {\n\t\tnum = int64(math.Ceil(floatBytes \/ helpers.GiB))\n\t\tnum *= helpers.GiB\n\t}\n\n\treturn num\n}\n\n\/\/ variables which will be set during the build time.\nvar (\n\t\/\/ GitCommit tell the latest git commit image is built from.\n\tGitCommit string\n\t\/\/ DriverVersion which will be driver version.\n\tDriverVersion string\n)\n\n\/\/ Config holds the parameters list which can be configured.\ntype Config struct {\n\tVtype string \/\/ driver type [rbd|cephfs|liveness|controller]\n\tEndpoint string \/\/ CSI endpoint\n\tDriverName string \/\/ name of the driver\n\tDriverNamespace string \/\/ namespace in which driver is deployed\n\tNodeID string \/\/ node id\n\tInstanceID string \/\/ unique ID distinguishing this instance of Ceph CSI\n\tPluginPath string \/\/ location of cephcsi plugin\n\tStagingPath string \/\/ location of cephcsi staging path\n\tDomainLabels string \/\/ list of domain labels to read from the node\n\n\t\/\/ metrics related flags\n\tMetricsPath string \/\/ path of prometheus endpoint where metrics will be available\n\tHistogramOption string \/\/ Histogram option for grpc metrics, should be comma separated value,\n\t\/\/ ex:= \"0.5,2,6\" where start=0.5 factor=2, count=6\n\tMetricsIP string \/\/ TCP port for liveness\/ metrics requests\n\tPidLimit int \/\/ PID limit to configure through cgroups\")\n\tMetricsPort int \/\/ TCP port for liveness\/grpc metrics requests\n\tPollTime time.Duration \/\/ time interval in seconds between each poll\n\tPoolTimeout time.Duration \/\/ probe timeout in seconds\n\tEnableGRPCMetrics bool \/\/ option to enable grpc metrics\n\n\tEnableProfiling bool \/\/ flag to enable profiling\n\tIsControllerServer bool \/\/ if set to true start provisoner server\n\tIsNodeServer bool \/\/ if set to true start node server\n\tVersion bool \/\/ cephcsi version\n\n\t\/\/ SkipForceFlatten is set to false if the kernel supports mounting of\n\t\/\/ rbd image or the image chain has the deep-flatten feature.\n\tSkipForceFlatten bool\n\n\t\/\/ cephfs related flags\n\tForceKernelCephFS bool \/\/ force to use the ceph kernel client even if the kernel is < 4.17\n\n\t\/\/ RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten\n\t\/\/ occurs\n\tRbdHardMaxCloneDepth uint\n\n\t\/\/ RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten\n\t\/\/ occurs\n\tRbdSoftMaxCloneDepth uint\n\n\t\/\/ MaxSnapshotsOnImage represents the maximum number of snapshots allowed\n\t\/\/ on rbd image without flattening, once the limit is reached cephcsi will\n\t\/\/ start flattening the older rbd images to allow more snapshots\n\tMaxSnapshotsOnImage uint\n\n\t\/\/ MinSnapshotsOnImage represents the soft limit for maximum number of\n\t\/\/ snapshots allowed on rbd image without flattening, once the soft limit is\n\t\/\/ reached cephcsi will start flattening the older rbd images.\n\tMinSnapshotsOnImage uint\n}\n\n\/\/ ValidateDriverName validates the driver name.\nfunc ValidateDriverName(driverName string) error {\n\tif driverName == \"\" {\n\t\treturn errors.New(\"driver name is empty\")\n\t}\n\n\tconst reqDriverNameLen = 63\n\tif len(driverName) > reqDriverNameLen {\n\t\treturn errors.New(\"driver name length should be less than 63 chars\")\n\t}\n\tvar err error\n\tfor _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {\n\t\tif err == nil {\n\t\t\terr = errors.New(msg)\n\n\t\t\tcontinue\n\t\t}\n\t\terr = fmt.Errorf(\"%s: %w\", msg, err)\n\t}\n\n\treturn err\n}\n\n\/\/ GetKernelVersion returns the version of the running Unix (like) system from the\n\/\/ 'utsname' structs 'release' component.\nfunc GetKernelVersion() (string, error) {\n\tutsname := unix.Utsname{}\n\tif err := unix.Uname(&utsname); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.TrimRight(string(utsname.Release[:]), \"\\x00\"), nil\n}\n\n\/\/ KernelVersion holds kernel related information.\ntype KernelVersion struct {\n\tVersion int\n\tPatchLevel int\n\tSubLevel int\n\tExtraVersion int \/\/ prefix of the part after the first \"-\"\n\tDistribution string \/\/ component of full extraversion\n\tBackport bool \/\/ backports have a fixed version\/patchlevel\/sublevel\n}\n\n\/\/ parseKernelRelease parses a kernel release version string into:\n\/\/ version, patch version, sub version and extra version.\nfunc parseKernelRelease(release string) (int, int, int, int, error) {\n\tversion := 0\n\tpatchlevel := 0\n\tminVersions := 2\n\n\textra := \"\"\n\tn, err := fmt.Sscanf(release, \"%d.%d%s\", &version, &patchlevel, &extra)\n\tif n < minVersions && err != nil {\n\t\treturn 0, 0, 0, 0, fmt.Errorf(\"failed to parse version and patchlevel from %s: %w\", release, err)\n\t}\n\n\tsublevel := 0\n\textraversion := 0\n\tif n > minVersions {\n\t\tn, err = fmt.Sscanf(extra, \".%d%s\", &sublevel, &extra)\n\t\tif err != nil && n == 0 && len(extra) > 0 && extra[0] != '-' && extra[0] == '.' {\n\t\t\treturn 0, 0, 0, 0, fmt.Errorf(\"failed to parse subversion from %s: %w\", release, err)\n\t\t}\n\n\t\textra = strings.TrimPrefix(extra, \"-\")\n\t\t\/\/ ignore errors, 1st component of extraversion does not need to be an int\n\t\t_, err = fmt.Sscanf(extra, \"%d\", &extraversion)\n\t\tif err != nil {\n\t\t\t\/\/ \"go lint\" wants err to be checked...\n\t\t\textraversion = 0\n\t\t}\n\t}\n\n\treturn version, patchlevel, sublevel, extraversion, nil\n}\n\n\/\/ CheckKernelSupport checks the running kernel and comparing it to known\n\/\/ versions that have support for required features . Distributors of\n\/\/ enterprise Linux have backported quota support to previous versions. This\n\/\/ function checks if the running kernel is one of the versions that have the\n\/\/ feature\/fixes backported.\n\/\/\n\/\/ `uname -r` (or Uname().Utsname.Release has a format like 1.2.3-rc.vendor\n\/\/ This can be slit up in the following components: - version (1) - patchlevel\n\/\/ (2) - sublevel (3) - optional, defaults to 0 - extraversion (rc) - optional,\n\/\/ matching integers only - distribution (.vendor) - optional, match against\n\/\/ whole `uname -r` string\n\/\/\n\/\/ For matching multiple versions, the kernelSupport type contains a backport\n\/\/ bool, which will cause matching\n\/\/ version+patchlevel+sublevel+(>=extraversion)+(~distribution)\n\/\/\n\/\/ In case the backport bool is false, a simple check for higher versions than\n\/\/ version+patchlevel+sublevel is done.\nfunc CheckKernelSupport(release string, supportedVersions []KernelVersion) bool {\n\tversion, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)\n\tif err != nil {\n\t\tlog.ErrorLogMsg(\"%v\", err)\n\n\t\treturn false\n\t}\n\n\t\/\/ compare running kernel against known versions\n\tfor _, kernel := range supportedVersions {\n\t\tif !kernel.Backport {\n\t\t\t\/\/ deal with the default case(s), find >= match for version, patchlevel, sublevel\n\t\t\tif version > kernel.Version || (version == kernel.Version && patchlevel > kernel.PatchLevel) ||\n\t\t\t\t(version == kernel.Version && patchlevel == kernel.PatchLevel && sublevel >= kernel.SubLevel) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ specific backport, match distribution initially\n\t\t\tif !strings.Contains(release, kernel.Distribution) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ strict match version, patchlevel, sublevel, and >= match extraversion\n\t\t\tif version == kernel.Version && patchlevel == kernel.PatchLevel &&\n\t\t\t\tsublevel == kernel.SubLevel && extraversion >= kernel.ExtraVersion {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tlog.WarningLogMsg(\"kernel %s does not support required features\", release)\n\n\treturn false\n}\n\n\/\/ GenerateVolID generates a volume ID based on passed in parameters and version, to be returned\n\/\/ to the CO system.\nfunc GenerateVolID(\n\tctx context.Context,\n\tmonitors string,\n\tcr *Credentials,\n\tlocationID int64,\n\tpool, clusterID, objUUID string,\n\tvolIDVersion uint16) (string, error) {\n\tvar err error\n\n\tif locationID == InvalidPoolID {\n\t\tlocationID, err = GetPoolID(monitors, cr, pool)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t\/\/ generate the volume ID to return to the CO system\n\tvi := CSIIdentifier{\n\t\tLocationID: locationID,\n\t\tEncodingVersion: volIDVersion,\n\t\tClusterID: clusterID,\n\t\tObjectUUID: objUUID,\n\t}\n\n\tvolID, err := vi.ComposeCSIID()\n\n\treturn volID, err\n}\n\n\/\/ CreateMountPoint creates the directory with given path.\nfunc CreateMountPoint(mountPath string) error {\n\treturn os.MkdirAll(mountPath, 0o750)\n}\n\n\/\/ checkDirExists checks directory exists or not.\nfunc checkDirExists(p string) bool {\n\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ IsMountPoint checks if the given path is mountpoint or not.\nfunc IsMountPoint(p string) (bool, error) {\n\tdummyMount := mount.New(\"\")\n\tnotMnt, err := dummyMount.IsLikelyNotMountPoint(p)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn !notMnt, nil\n}\n\n\/\/ Mount mounts the source to target path.\nfunc Mount(source, target, fstype string, options []string) error {\n\tdummyMount := mount.New(\"\")\n\n\treturn dummyMount.Mount(source, target, fstype, options)\n}\n\n\/\/ MountOptionsAdd adds the `add` mount options to the `options` and returns a\n\/\/ new string. In case `add` is already present in the `options`, `add` is not\n\/\/ added again.\nfunc MountOptionsAdd(options string, add ...string) string {\n\topts := strings.Split(options, \",\")\n\tnewOpts := []string{}\n\t\/\/ clean original options from empty strings\n\tfor _, opt := range opts {\n\t\tif opt != \"\" {\n\t\t\tnewOpts = append(newOpts, opt)\n\t\t}\n\t}\n\n\tfor _, opt := range add {\n\t\tif opt != \"\" && !contains(newOpts, opt) {\n\t\t\tnewOpts = append(newOpts, opt)\n\t\t}\n\t}\n\n\treturn strings.Join(newOpts, \",\")\n}\n\nfunc contains(s []string, key string) bool {\n\tfor _, v := range s {\n\t\tif v == key {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ CallStack returns the stack of the calls in the current goroutine. Useful\n\/\/ for debugging or reporting errors. This is a friendly alternative to\n\/\/ assert() or panic().\nfunc CallStack() string {\n\tstack := make([]byte, 2048)\n\t_ = runtime.Stack(stack, false)\n\n\treturn string(stack)\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\n\t\"github.com\/jdkato\/prose\/internal\/model\"\n)\n\n\/\/ ReadDataFile reads data from a file, panicking on any errors.\nfunc ReadDataFile(path string) []byte {\n\tp, err := filepath.Abs(path)\n\tCheckError(err)\n\n\tdata, ferr := ioutil.ReadFile(p)\n\tCheckError(ferr)\n\n\treturn data\n}\n\n\/\/ CheckError panics if `err` is not `nil`.\nfunc CheckError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Min returns the minimum of `a` and `b`.\nfunc Min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ IsPunct determines if a character is a punctuation symbol.\nfunc IsPunct(c byte) bool {\n\tfor _, r := range []byte(\"!\\\"#$%&'()*+,-.\/:;<=>?@[\\\\]^_`{|}~\") {\n\t\tif c == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsSpace determines if a character is a whitespace character.\nfunc IsSpace(c byte) bool {\n\tfor _, r := range []byte(\"\\t\\n\\r\\f\\v\") {\n\t\tif c == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsLetter determines if a character is letter.\nfunc IsLetter(c byte) bool {\n\treturn (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')\n}\n\n\/\/ IsAlnum determines if a character is a letter or a digit.\nfunc IsAlnum(c byte) bool {\n\treturn (c >= '0' && c <= '9') || IsLetter(c)\n}\n\n\/\/ GetAsset returns the named Asset.\nfunc GetAsset(name string) []byte {\n\tb, err := model.Asset(\"internal\/model\/\" + name)\n\tCheckError(err)\n\treturn b\n}\n<commit_msg>[internal]: add util function<commit_after>package util\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\n\t\"github.com\/jdkato\/prose\/internal\/model\"\n)\n\n\/\/ ReadDataFile reads data from a file, panicking on any errors.\nfunc ReadDataFile(path string) []byte {\n\tp, err := filepath.Abs(path)\n\tCheckError(err)\n\n\tdata, ferr := ioutil.ReadFile(p)\n\tCheckError(ferr)\n\n\treturn data\n}\n\n\/\/ CheckError panics if `err` is not `nil`.\nfunc CheckError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Min returns the minimum of `a` and `b`.\nfunc Min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ IsPunct determines if a character is a punctuation symbol.\nfunc IsPunct(c byte) bool {\n\tfor _, r := range []byte(\"!\\\"#$%&'()*+,-.\/:;<=>?@[\\\\]^_`{|}~\") {\n\t\tif c == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsSpace determines if a character is a whitespace character.\nfunc IsSpace(c byte) bool {\n\tfor _, r := range []byte(\"\\t\\n\\r\\f\\v\") {\n\t\tif c == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsLetter determines if a character is letter.\nfunc IsLetter(c byte) bool {\n\treturn (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')\n}\n\n\/\/ IsAlnum determines if a character is a letter or a digit.\nfunc IsAlnum(c byte) bool {\n\treturn (c >= '0' && c <= '9') || IsLetter(c)\n}\n\n\/\/ GetAsset returns the named Asset.\nfunc GetAsset(name string) []byte {\n\tb, err := model.Asset(\"internal\/model\/\" + name)\n\tCheckError(err)\n\treturn b\n}\n\n\/\/ StringInSlice determines if `slice` contains the string `a`.\nfunc StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package sudoku\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n)\n\ntype SolveDirections []*SolveStep\n\nconst (\n\tNAKED_SINGLE = iota\n\tHIDDEN_SINGLE_IN_ROW\n\tHIDDEN_SINGLE_IN_COL\n\tHIDDEN_SINGLE_IN_BLOCK\n)\n\ntype SolveStep struct {\n\tTargetCells CellList\n\tPointerCells CellList\n\tNums []int\n\tTechnique SolveTechnique\n}\n\ntype SolveTechnique interface {\n\tName() string\n\tDescription(*SolveStep) string\n\tFind(*Grid) *SolveStep\n\tIsFill() bool\n}\n\ntype fillSolveTechnique struct {\n}\n\ntype cullSolveTechnique struct {\n}\n\nvar fillTechniques []SolveTechnique\nvar cullTechniques []SolveTechnique\n\nfunc init() {\n\t\/\/TODO: init techniques with enough space\n\tfillTechniques = append(fillTechniques, nakedSingleTechnique{})\n\tfillTechniques = append(fillTechniques, hiddenSingleInRow{})\n\tfillTechniques = append(fillTechniques, hiddenSingleInCol{})\n\tfillTechniques = append(fillTechniques, hiddenSingleInBlock{})\n\tcullTechniques = append(cullTechniques, pointingPair{})\n}\n\ntype nakedSingleTechnique struct {\n\t*fillSolveTechnique\n}\n\ntype hiddenSingleInRow struct {\n\t*fillSolveTechnique\n}\n\ntype hiddenSingleInCol struct {\n\t*fillSolveTechnique\n}\n\ntype hiddenSingleInBlock struct {\n\t*fillSolveTechnique\n}\n\ntype pointingPair struct {\n\t*cullSolveTechnique\n}\n\nfunc (self *fillSolveTechnique) IsFill() bool {\n\treturn true\n}\n\nfunc (self *cullSolveTechnique) IsFill() bool {\n\treturn false\n}\n\nfunc newFillSolveStep(cell *Cell, num int, technique SolveTechnique) *SolveStep {\n\t\/\/TODO: why do these need to be pulled out separately?\n\tcellArr := [...]*Cell{cell}\n\tnumArr := [...]int{num}\n\treturn &SolveStep{cellArr[:], nil, numArr[:], technique}\n}\n\nfunc (self *SolveStep) Apply(grid *Grid) {\n\t\/\/TODO: also handle non isFill items.\n\tif self.Technique.IsFill() {\n\t\tif len(self.TargetCells) == 0 || len(self.Nums) == 0 {\n\t\t\treturn\n\t\t}\n\t\tcell := self.TargetCells[0].InGrid(grid)\n\t\tcell.SetNumber(self.Nums[0])\n\t}\n}\n\nfunc (self nakedSingleTechnique) Name() string {\n\treturn \"Only Legal Number\"\n}\n\nfunc (self nakedSingleTechnique) Description(step *SolveStep) string {\n\tif len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is the only remaining valid number for that cell\", num)\n}\n\nfunc (self nakedSingleTechnique) Find(grid *Grid) *SolveStep {\n\t\/\/This will be a random item\n\tobj := grid.queue.NewGetter().GetSmallerThan(2)\n\tif obj == nil {\n\t\t\/\/There weren't any cells with one option.\n\t\treturn nil\n\t}\n\tcell := obj.(*Cell)\n\treturn newFillSolveStep(cell, cell.implicitNumber(), self)\n}\n\nfunc (self hiddenSingleInRow) Name() string {\n\treturn \"Necessary In Row\"\n}\n\nfunc (self hiddenSingleInRow) Description(step *SolveStep) string {\n\t\/\/TODO: format the text to say \"first\/second\/third\/etc\"\n\tif len(step.TargetCells) == 0 || len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tcell := step.TargetCells[0]\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is required in the %d row, and %d is the only column it fits\", num, cell.Row+1, cell.Col+1)\n}\n\nfunc (self hiddenSingleInRow) Find(grid *Grid) *SolveStep {\n\tgetter := func(index int) []*Cell {\n\t\treturn grid.Row(index)\n\t}\n\treturn necessaryInCollection(grid, self, getter)\n}\n\nfunc (self hiddenSingleInCol) Name() string {\n\treturn \"Necessary In Col\"\n}\n\nfunc (self hiddenSingleInCol) Description(step *SolveStep) string {\n\t\/\/TODO: format the text to say \"first\/second\/third\/etc\"\n\tif len(step.TargetCells) == 0 || len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tcell := step.TargetCells[0]\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is required in the %d column, and %d is the only row it fits\", num, cell.Row+1, cell.Col+1)\n}\n\nfunc (self hiddenSingleInCol) Find(grid *Grid) *SolveStep {\n\tgetter := func(index int) []*Cell {\n\t\treturn grid.Col(index)\n\t}\n\treturn necessaryInCollection(grid, self, getter)\n}\n\nfunc (self hiddenSingleInBlock) Name() string {\n\treturn \"Necessary In Block\"\n}\n\nfunc (self hiddenSingleInBlock) Description(step *SolveStep) string {\n\t\/\/TODO: format the text to say \"first\/second\/third\/etc\"\n\tif len(step.TargetCells) == 0 || len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tcell := step.TargetCells[0]\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is required in the %d block, and %d, %d is the only cell it fits\", num, cell.Block+1, cell.Row+1, cell.Col+1)\n}\n\nfunc (self hiddenSingleInBlock) Find(grid *Grid) *SolveStep {\n\tgetter := func(index int) []*Cell {\n\t\treturn grid.Block(index)\n\t}\n\treturn necessaryInCollection(grid, self, getter)\n}\n\nfunc necessaryInCollection(grid *Grid, technique SolveTechnique, collectionGetter func(index int) []*Cell) *SolveStep {\n\t\/\/This will be a random item\n\tindexes := rand.Perm(DIM)\n\n\tfor _, i := range indexes {\n\t\tseenInCollection := make([]int, DIM)\n\t\tcollection := collectionGetter(i)\n\t\tfor _, cell := range collection {\n\t\t\tfor _, possibility := range cell.Possibilities() {\n\t\t\t\tseenInCollection[possibility-1]++\n\t\t\t}\n\t\t}\n\t\tseenIndexes := rand.Perm(DIM)\n\t\tfor _, index := range seenIndexes {\n\t\t\tseen := seenInCollection[index]\n\t\t\tif seen == 1 {\n\t\t\t\t\/\/Okay, we know our target number. Which cell was it?\n\t\t\t\tfor _, cell := range collection {\n\t\t\t\t\tif cell.Possible(index + 1) {\n\t\t\t\t\t\t\/\/Found it!\n\t\t\t\t\t\treturn newFillSolveStep(cell, index+1, technique)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/Nope.\n\treturn nil\n}\n\nfunc (self pointingPair) Name() string {\n\treturn \"Pointing pair\"\n}\n\nfunc (self pointingPair) Description(step *SolveStep) string {\n\t\/\/TODO: implement this\n\treturn \"\"\n}\n\nfunc (self pointingPair) Find(grid *Grid) *SolveStep {\n\t\/\/Within each block, for each number, see if all items that allow it are aligned in a row or column.\n\t\/\/TODO: randomize order of blocks.\n\t\/\/TODO: should we create a FilterByLegal method on lists of cells?\n\t\/\/TODO: finish this!\n\tfor i := 0; i < DIM; i++ {\n\t\tblock := grid.Block(i)\n\t\t\/\/TODO: randomize order of numbers to test for.\n\t\tfor num := 0; num < DIM; num++ {\n\t\t\tcells := block.FilterByPossible(num + 1)\n\t\t\t\/\/cellList is now a list of all cells that have that number.\n\t\t\tif len(cells) == 0 || len(cells) > BLOCK_DIM {\n\t\t\t\t\/\/Meh, not a match.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/Okay, it's possible it's a match. Are all rows the same?\n\t\t\tif cells.SameRow() {\n\t\t\t\t\/\/Yup!\n\t\t\t\treturn &SolveStep{grid.Row(cells.Row()).RemoveCells(cells), cells, []int{num + 1}, self}\n\t\t\t}\n\t\t\t\/\/Okay, are all cols?\n\t\t\tif cells.SameCol() {\n\t\t\t\t\/\/Yup!\n\t\t\t\treturn &SolveStep{grid.Col(cells.Col()).RemoveCells(cells), cells, []int{num + 1}, self}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (self *Grid) HumanSolve() SolveDirections {\n\tvar results []*SolveStep\n\tfor !self.Solved() {\n\t\t\/\/TODO: try the techniques in parallel\n\t\t\/\/TODO: pick the technique based on a weighting of how common a human is to pick each one.\n\t\t\/\/TODO: provide hints to the techniques of where to look based on the last filled cell\n\t\t\/\/TODO: if no fill techniques work, use a culltechnique.\n\t\ttechniqueOrder := rand.Perm(len(fillTechniques))\n\t\tfor _, index := range techniqueOrder {\n\t\t\ttechnique := fillTechniques[index]\n\t\t\tstep := technique.Find(self)\n\t\t\tif step != nil {\n\t\t\t\tresults = append(results, step)\n\t\t\t\tstep.Apply(self)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif !self.Solved() {\n\t\t\/\/We couldn't solve the puzzle.\n\t\treturn nil\n\t}\n\treturn results\n}\n<commit_msg>solveStep.Apply() should work for cull steps, too (not tested).<commit_after>package sudoku\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n)\n\ntype SolveDirections []*SolveStep\n\nconst (\n\tNAKED_SINGLE = iota\n\tHIDDEN_SINGLE_IN_ROW\n\tHIDDEN_SINGLE_IN_COL\n\tHIDDEN_SINGLE_IN_BLOCK\n)\n\ntype SolveStep struct {\n\tTargetCells CellList\n\tPointerCells CellList\n\tNums []int\n\tTechnique SolveTechnique\n}\n\ntype SolveTechnique interface {\n\tName() string\n\tDescription(*SolveStep) string\n\tFind(*Grid) *SolveStep\n\tIsFill() bool\n}\n\ntype fillSolveTechnique struct {\n}\n\ntype cullSolveTechnique struct {\n}\n\nvar fillTechniques []SolveTechnique\nvar cullTechniques []SolveTechnique\n\nfunc init() {\n\t\/\/TODO: init techniques with enough space\n\tfillTechniques = append(fillTechniques, nakedSingleTechnique{})\n\tfillTechniques = append(fillTechniques, hiddenSingleInRow{})\n\tfillTechniques = append(fillTechniques, hiddenSingleInCol{})\n\tfillTechniques = append(fillTechniques, hiddenSingleInBlock{})\n\tcullTechniques = append(cullTechniques, pointingPair{})\n}\n\ntype nakedSingleTechnique struct {\n\t*fillSolveTechnique\n}\n\ntype hiddenSingleInRow struct {\n\t*fillSolveTechnique\n}\n\ntype hiddenSingleInCol struct {\n\t*fillSolveTechnique\n}\n\ntype hiddenSingleInBlock struct {\n\t*fillSolveTechnique\n}\n\ntype pointingPair struct {\n\t*cullSolveTechnique\n}\n\nfunc (self *fillSolveTechnique) IsFill() bool {\n\treturn true\n}\n\nfunc (self *cullSolveTechnique) IsFill() bool {\n\treturn false\n}\n\nfunc newFillSolveStep(cell *Cell, num int, technique SolveTechnique) *SolveStep {\n\t\/\/TODO: why do these need to be pulled out separately?\n\tcellArr := [...]*Cell{cell}\n\tnumArr := [...]int{num}\n\treturn &SolveStep{cellArr[:], nil, numArr[:], technique}\n}\n\nfunc (self *SolveStep) Apply(grid *Grid) {\n\tif self.Technique.IsFill() {\n\t\tif len(self.TargetCells) == 0 || len(self.Nums) == 0 {\n\t\t\treturn\n\t\t}\n\t\tcell := self.TargetCells[0].InGrid(grid)\n\t\tcell.SetNumber(self.Nums[0])\n\t} else {\n\t\tfor _, cell := range self.TargetCells {\n\t\t\tgridCell := cell.InGrid(grid)\n\t\t\tfor _, exclude := range self.Nums {\n\t\t\t\tgridCell.setExcluded(exclude, true)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (self nakedSingleTechnique) Name() string {\n\treturn \"Only Legal Number\"\n}\n\nfunc (self nakedSingleTechnique) Description(step *SolveStep) string {\n\tif len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is the only remaining valid number for that cell\", num)\n}\n\nfunc (self nakedSingleTechnique) Find(grid *Grid) *SolveStep {\n\t\/\/This will be a random item\n\tobj := grid.queue.NewGetter().GetSmallerThan(2)\n\tif obj == nil {\n\t\t\/\/There weren't any cells with one option.\n\t\treturn nil\n\t}\n\tcell := obj.(*Cell)\n\treturn newFillSolveStep(cell, cell.implicitNumber(), self)\n}\n\nfunc (self hiddenSingleInRow) Name() string {\n\treturn \"Necessary In Row\"\n}\n\nfunc (self hiddenSingleInRow) Description(step *SolveStep) string {\n\t\/\/TODO: format the text to say \"first\/second\/third\/etc\"\n\tif len(step.TargetCells) == 0 || len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tcell := step.TargetCells[0]\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is required in the %d row, and %d is the only column it fits\", num, cell.Row+1, cell.Col+1)\n}\n\nfunc (self hiddenSingleInRow) Find(grid *Grid) *SolveStep {\n\tgetter := func(index int) []*Cell {\n\t\treturn grid.Row(index)\n\t}\n\treturn necessaryInCollection(grid, self, getter)\n}\n\nfunc (self hiddenSingleInCol) Name() string {\n\treturn \"Necessary In Col\"\n}\n\nfunc (self hiddenSingleInCol) Description(step *SolveStep) string {\n\t\/\/TODO: format the text to say \"first\/second\/third\/etc\"\n\tif len(step.TargetCells) == 0 || len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tcell := step.TargetCells[0]\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is required in the %d column, and %d is the only row it fits\", num, cell.Row+1, cell.Col+1)\n}\n\nfunc (self hiddenSingleInCol) Find(grid *Grid) *SolveStep {\n\tgetter := func(index int) []*Cell {\n\t\treturn grid.Col(index)\n\t}\n\treturn necessaryInCollection(grid, self, getter)\n}\n\nfunc (self hiddenSingleInBlock) Name() string {\n\treturn \"Necessary In Block\"\n}\n\nfunc (self hiddenSingleInBlock) Description(step *SolveStep) string {\n\t\/\/TODO: format the text to say \"first\/second\/third\/etc\"\n\tif len(step.TargetCells) == 0 || len(step.Nums) == 0 {\n\t\treturn \"\"\n\t}\n\tcell := step.TargetCells[0]\n\tnum := step.Nums[0]\n\treturn fmt.Sprintf(\"%d is required in the %d block, and %d, %d is the only cell it fits\", num, cell.Block+1, cell.Row+1, cell.Col+1)\n}\n\nfunc (self hiddenSingleInBlock) Find(grid *Grid) *SolveStep {\n\tgetter := func(index int) []*Cell {\n\t\treturn grid.Block(index)\n\t}\n\treturn necessaryInCollection(grid, self, getter)\n}\n\nfunc necessaryInCollection(grid *Grid, technique SolveTechnique, collectionGetter func(index int) []*Cell) *SolveStep {\n\t\/\/This will be a random item\n\tindexes := rand.Perm(DIM)\n\n\tfor _, i := range indexes {\n\t\tseenInCollection := make([]int, DIM)\n\t\tcollection := collectionGetter(i)\n\t\tfor _, cell := range collection {\n\t\t\tfor _, possibility := range cell.Possibilities() {\n\t\t\t\tseenInCollection[possibility-1]++\n\t\t\t}\n\t\t}\n\t\tseenIndexes := rand.Perm(DIM)\n\t\tfor _, index := range seenIndexes {\n\t\t\tseen := seenInCollection[index]\n\t\t\tif seen == 1 {\n\t\t\t\t\/\/Okay, we know our target number. Which cell was it?\n\t\t\t\tfor _, cell := range collection {\n\t\t\t\t\tif cell.Possible(index + 1) {\n\t\t\t\t\t\t\/\/Found it!\n\t\t\t\t\t\treturn newFillSolveStep(cell, index+1, technique)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/Nope.\n\treturn nil\n}\n\nfunc (self pointingPair) Name() string {\n\treturn \"Pointing pair\"\n}\n\nfunc (self pointingPair) Description(step *SolveStep) string {\n\t\/\/TODO: implement this\n\treturn \"\"\n}\n\nfunc (self pointingPair) Find(grid *Grid) *SolveStep {\n\t\/\/Within each block, for each number, see if all items that allow it are aligned in a row or column.\n\t\/\/TODO: randomize order of blocks.\n\t\/\/TODO: should we create a FilterByLegal method on lists of cells?\n\t\/\/TODO: finish this!\n\tfor i := 0; i < DIM; i++ {\n\t\tblock := grid.Block(i)\n\t\t\/\/TODO: randomize order of numbers to test for.\n\t\tfor num := 0; num < DIM; num++ {\n\t\t\tcells := block.FilterByPossible(num + 1)\n\t\t\t\/\/cellList is now a list of all cells that have that number.\n\t\t\tif len(cells) == 0 || len(cells) > BLOCK_DIM {\n\t\t\t\t\/\/Meh, not a match.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/Okay, it's possible it's a match. Are all rows the same?\n\t\t\tif cells.SameRow() {\n\t\t\t\t\/\/Yup!\n\t\t\t\treturn &SolveStep{grid.Row(cells.Row()).RemoveCells(cells), cells, []int{num + 1}, self}\n\t\t\t}\n\t\t\t\/\/Okay, are all cols?\n\t\t\tif cells.SameCol() {\n\t\t\t\t\/\/Yup!\n\t\t\t\treturn &SolveStep{grid.Col(cells.Col()).RemoveCells(cells), cells, []int{num + 1}, self}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (self *Grid) HumanSolve() SolveDirections {\n\tvar results []*SolveStep\n\tfor !self.Solved() {\n\t\t\/\/TODO: try the techniques in parallel\n\t\t\/\/TODO: pick the technique based on a weighting of how common a human is to pick each one.\n\t\t\/\/TODO: provide hints to the techniques of where to look based on the last filled cell\n\t\t\/\/TODO: if no fill techniques work, use a culltechnique.\n\t\ttechniqueOrder := rand.Perm(len(fillTechniques))\n\t\tfor _, index := range techniqueOrder {\n\t\t\ttechnique := fillTechniques[index]\n\t\t\tstep := technique.Find(self)\n\t\t\tif step != nil {\n\t\t\t\tresults = append(results, step)\n\t\t\t\tstep.Apply(self)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif !self.Solved() {\n\t\t\/\/We couldn't solve the puzzle.\n\t\treturn nil\n\t}\n\treturn results\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2017 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage cloud\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\t\/\/ RequestTimeout is the default cloud request timeout\n\tRequestTimeout = 20 * time.Second\n\t\/\/ RetryInterval is the default cloud request retry interval\n\tRetryInterval = 500 * time.Millisecond\n\t\/\/ MaxRetries specifies max retry attempts\n\tMaxRetries = 3\n\n\tk6IdempotencyKeyHeader = \"k6-Idempotency-Key\"\n)\n\n\/\/ Client handles communication with Load Impact cloud API.\ntype Client struct {\n\tclient *http.Client\n\ttoken string\n\tbaseURL string\n\tversion string\n\n\tretries int\n\tretryInterval time.Duration\n}\n\nfunc NewClient(token, host, version string) *Client {\n\tc := &Client{\n\t\tclient: &http.Client{Timeout: RequestTimeout},\n\t\ttoken: token,\n\t\tbaseURL: fmt.Sprintf(\"%s\/v1\", host),\n\t\tversion: version,\n\t\tretries: MaxRetries,\n\t\tretryInterval: RetryInterval,\n\t}\n\treturn c\n}\n\nfunc (c *Client) NewRequest(method, url string, data interface{}) (*http.Request, error) {\n\tvar buf io.Reader\n\n\tif data != nil {\n\t\tb, err := json.Marshal(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbuf = bytes.NewBuffer(b)\n\t}\n\n\treq, err := http.NewRequest(method, url, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.prepareHeaders(req)\n\n\treturn req, nil\n}\n\nfunc (c *Client) Do(req *http.Request, v interface{}) error {\n\tvar originalBody []byte\n\tvar err error\n\n\tif req.Body != nil {\n\t\toriginalBody, err = ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif cerr := req.Body.Close(); cerr != nil {\n\t\t\terr = cerr\n\t\t}\n\t}\n\n\tfor i := 1; i <= c.retries; i++ {\n\t\tif len(originalBody) > 0 {\n\t\t\treq.Body = ioutil.NopCloser(bytes.NewBuffer(originalBody))\n\t\t}\n\n\t\tretry, err := c.do(req, v, i)\n\n\t\tif retry {\n\t\t\ttime.Sleep(c.retryInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn err\n}\n\nfunc (c *Client) prepareHeaders(req *http.Request) {\n\tif req.Header.Get(\"Content-Type\") == \"\" {\n\t\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\t}\n\n\tif c.token != \"\" {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Token %s\", c.token))\n\t}\n\n\tif shouldAddIdempotencyKey(req) {\n\t\treq.Header.Set(k6IdempotencyKeyHeader, randomStrHex())\n\t}\n\n\treq.Header.Set(\"User-Agent\", \"k6cloud\/\"+c.version)\n}\n\nfunc (c *Client) do(req *http.Request, v interface{}, attempt int) (retry bool, err error) {\n\tresp, err := c.client.Do(req)\n\n\tdefer func() {\n\t\tif resp != nil {\n\t\t\tif cerr := resp.Body.Close(); cerr != nil && err == nil {\n\t\t\t\terr = cerr\n\t\t\t}\n\t\t}\n\t}()\n\n\tif shouldRetry(resp, err, attempt, c.retries) {\n\t\treturn true, err\n\t}\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif err = checkResponse(resp); err != nil {\n\t\treturn false, err\n\t}\n\n\tif v != nil {\n\t\tif err = json.NewDecoder(resp.Body).Decode(v); err == io.EOF {\n\t\t\terr = nil \/\/ Ignore EOF from empty body\n\t\t}\n\t}\n\n\treturn false, err\n}\n\nfunc checkResponse(r *http.Response) error {\n\tif r == nil {\n\t\treturn ErrUnknown\n\t}\n\n\tif c := r.StatusCode; c >= 200 && c <= 299 {\n\t\treturn nil\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar payload struct {\n\t\tError ErrorResponse `json:\"error\"`\n\t}\n\tif err := json.Unmarshal(data, &payload); err != nil {\n\t\tif r.StatusCode == http.StatusUnauthorized {\n\t\t\treturn ErrNotAuthenticated\n\t\t}\n\t\tif r.StatusCode == http.StatusForbidden {\n\t\t\treturn ErrNotAuthorized\n\t\t}\n\t\treturn errors.Errorf(\n\t\t\t\"Unexpected HTTP error from %s: %d %s\",\n\t\t\tr.Request.URL,\n\t\t\tr.StatusCode,\n\t\t\thttp.StatusText(r.StatusCode),\n\t\t)\n\t}\n\tpayload.Error.Response = r\n\treturn payload.Error\n}\n\nfunc shouldRetry(resp *http.Response, err error, attempt, maxAttempts int) bool {\n\tif attempt >= maxAttempts {\n\t\treturn false\n\t}\n\n\tif resp == nil || err != nil {\n\t\treturn true\n\t}\n\n\tif resp.StatusCode >= 500 || resp.StatusCode == 429 {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc shouldAddIdempotencyKey(req *http.Request) bool {\n\tswitch req.Method {\n\tcase http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodTrace:\n\t\treturn false\n\tdefault:\n\t\treturn req.Header.Get(k6IdempotencyKeyHeader) == \"\"\n\t}\n}\n\n\/\/ randomStrHex returns a hex string which can be used\n\/\/ for session token id or idempotency key.\n\/\/nolint:gosec\nfunc randomStrHex() string {\n\t\/\/ 16 hex characters\n\tb := make([]byte, 8)\n\t_, _ = rand.Read(b)\n\treturn hex.EncodeToString(b)\n}\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n<commit_msg>stats\/cloud: move prepareHeaders inside Do<commit_after>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2017 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage cloud\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\t\/\/ RequestTimeout is the default cloud request timeout\n\tRequestTimeout = 20 * time.Second\n\t\/\/ RetryInterval is the default cloud request retry interval\n\tRetryInterval = 500 * time.Millisecond\n\t\/\/ MaxRetries specifies max retry attempts\n\tMaxRetries = 3\n\n\tk6IdempotencyKeyHeader = \"k6-Idempotency-Key\"\n)\n\n\/\/ Client handles communication with Load Impact cloud API.\ntype Client struct {\n\tclient *http.Client\n\ttoken string\n\tbaseURL string\n\tversion string\n\n\tretries int\n\tretryInterval time.Duration\n}\n\nfunc NewClient(token, host, version string) *Client {\n\tc := &Client{\n\t\tclient: &http.Client{Timeout: RequestTimeout},\n\t\ttoken: token,\n\t\tbaseURL: fmt.Sprintf(\"%s\/v1\", host),\n\t\tversion: version,\n\t\tretries: MaxRetries,\n\t\tretryInterval: RetryInterval,\n\t}\n\treturn c\n}\n\nfunc (c *Client) NewRequest(method, url string, data interface{}) (*http.Request, error) {\n\tvar buf io.Reader\n\n\tif data != nil {\n\t\tb, err := json.Marshal(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbuf = bytes.NewBuffer(b)\n\t}\n\n\treq, err := http.NewRequest(method, url, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}\n\nfunc (c *Client) Do(req *http.Request, v interface{}) error {\n\tvar originalBody []byte\n\tvar err error\n\n\tif req.Body != nil {\n\t\toriginalBody, err = ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif cerr := req.Body.Close(); cerr != nil {\n\t\t\terr = cerr\n\t\t}\n\t}\n\n\t\/\/ TODO(cuonglm): finding away to move this back to NewRequest\n\tc.prepareHeaders(req)\n\n\tfor i := 1; i <= c.retries; i++ {\n\t\tif len(originalBody) > 0 {\n\t\t\treq.Body = ioutil.NopCloser(bytes.NewBuffer(originalBody))\n\t\t}\n\n\t\tretry, err := c.do(req, v, i)\n\n\t\tif retry {\n\t\t\ttime.Sleep(c.retryInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn err\n}\n\nfunc (c *Client) prepareHeaders(req *http.Request) {\n\tif req.Header.Get(\"Content-Type\") == \"\" {\n\t\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\t}\n\n\tif c.token != \"\" {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Token %s\", c.token))\n\t}\n\n\tif shouldAddIdempotencyKey(req) {\n\t\treq.Header.Set(k6IdempotencyKeyHeader, randomStrHex())\n\t}\n\n\treq.Header.Set(\"User-Agent\", \"k6cloud\/\"+c.version)\n}\n\nfunc (c *Client) do(req *http.Request, v interface{}, attempt int) (retry bool, err error) {\n\tresp, err := c.client.Do(req)\n\n\tdefer func() {\n\t\tif resp != nil {\n\t\t\tif cerr := resp.Body.Close(); cerr != nil && err == nil {\n\t\t\t\terr = cerr\n\t\t\t}\n\t\t}\n\t}()\n\n\tif shouldRetry(resp, err, attempt, c.retries) {\n\t\treturn true, err\n\t}\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif err = checkResponse(resp); err != nil {\n\t\treturn false, err\n\t}\n\n\tif v != nil {\n\t\tif err = json.NewDecoder(resp.Body).Decode(v); err == io.EOF {\n\t\t\terr = nil \/\/ Ignore EOF from empty body\n\t\t}\n\t}\n\n\treturn false, err\n}\n\nfunc checkResponse(r *http.Response) error {\n\tif r == nil {\n\t\treturn ErrUnknown\n\t}\n\n\tif c := r.StatusCode; c >= 200 && c <= 299 {\n\t\treturn nil\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar payload struct {\n\t\tError ErrorResponse `json:\"error\"`\n\t}\n\tif err := json.Unmarshal(data, &payload); err != nil {\n\t\tif r.StatusCode == http.StatusUnauthorized {\n\t\t\treturn ErrNotAuthenticated\n\t\t}\n\t\tif r.StatusCode == http.StatusForbidden {\n\t\t\treturn ErrNotAuthorized\n\t\t}\n\t\treturn errors.Errorf(\n\t\t\t\"Unexpected HTTP error from %s: %d %s\",\n\t\t\tr.Request.URL,\n\t\t\tr.StatusCode,\n\t\t\thttp.StatusText(r.StatusCode),\n\t\t)\n\t}\n\tpayload.Error.Response = r\n\treturn payload.Error\n}\n\nfunc shouldRetry(resp *http.Response, err error, attempt, maxAttempts int) bool {\n\tif attempt >= maxAttempts {\n\t\treturn false\n\t}\n\n\tif resp == nil || err != nil {\n\t\treturn true\n\t}\n\n\tif resp.StatusCode >= 500 || resp.StatusCode == 429 {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc shouldAddIdempotencyKey(req *http.Request) bool {\n\tswitch req.Method {\n\tcase http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodTrace:\n\t\treturn false\n\tdefault:\n\t\treturn req.Header.Get(k6IdempotencyKeyHeader) == \"\"\n\t}\n}\n\n\/\/ randomStrHex returns a hex string which can be used\n\/\/ for session token id or idempotency key.\n\/\/nolint:gosec\nfunc randomStrHex() string {\n\t\/\/ 16 hex characters\n\tb := make([]byte, 8)\n\t_, _ = rand.Read(b)\n\treturn hex.EncodeToString(b)\n}\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 SteelSeries ApS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package implements a basic LISP interpretor for embedding in a go program for scripting.\n\/\/ This file implements the symbol table.\n\npackage golisp\n\nimport (\n\t\"container\/list\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\nconst (\n\tREAD_LOCK = iota\n\tWRITE_LOCK\n)\n\ntype SymbolTableFrame struct {\n\tName string\n\tParent *SymbolTableFrame\n\tPrevious *SymbolTableFrame\n\tFrame *FrameMap\n\tBindings map[string]*Binding\n\tCurrentCode *list.List\n\tIsRestricted bool\n}\n\ntype symbolsTable struct {\n\tSymbols map[string]*Data\n\tMutex sync.RWMutex\n}\n\ntype environmentsTable struct {\n\tEnvironments map[string]*SymbolTableFrame\n\tMutex sync.RWMutex\n}\n\nvar Global *SymbolTableFrame\nvar TopLevelEnvironments environmentsTable = environmentsTable{make(map[string]*SymbolTableFrame, 5), sync.RWMutex{}}\n\nvar internedSymbols symbolsTable = symbolsTable{make(map[string]*Data, 256), sync.RWMutex{}}\n\nfunc Intern(name string) (sym *Data) {\n\t\/\/ Naked symbols do not need to be added to the symbol table\n\tif strings.HasSuffix(name, \":\") {\n\t\treturn SymbolWithName(name)\n\t}\n\n\tinternedSymbols.Mutex.RLock()\n\tlock := READ_LOCK\n\tdefer func() {\n\t\tif lock == READ_LOCK {\n\t\t\tinternedSymbols.Mutex.RUnlock()\n\t\t} else {\n\t\t\tinternedSymbols.Mutex.Unlock()\n\t\t}\n\t}()\n\n\tsym = internedSymbols.Symbols[name]\n\tif sym == nil {\n\t\tinternedSymbols.Mutex.RUnlock()\n\t\tinternedSymbols.Mutex.Lock()\n\t\tlock = WRITE_LOCK\n\t\tsym = SymbolWithName(name)\n\t\tinternedSymbols.Symbols[name] = sym\n\t}\n\treturn\n}\n\nfunc (self *SymbolTableFrame) Depth() int {\n\tif self.Previous == nil {\n\t\treturn 1\n\t} else {\n\t\treturn 1 + self.Previous.Depth()\n\t}\n}\n\nfunc (self *SymbolTableFrame) CurrentCodeString() string {\n\tif self.CurrentCode.Len() > 0 {\n\t\treturn self.CurrentCode.Front().Value.(string)\n\t} else {\n\t\treturn \"Unknown code\"\n\t}\n}\n\nfunc (self *SymbolTableFrame) InternalDump(frameNumber int) {\n\tfmt.Printf(\"Frame %d: %s\\n\", frameNumber, self.CurrentCodeString())\n\tfor _, b := range self.Bindings {\n\t\tif b.Val == nil || TypeOf(b.Val) != PrimitiveType {\n\t\t\tb.Dump()\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\")\n\tif self.Previous != nil {\n\t\tself.Previous.InternalDump(frameNumber + 1)\n\t}\n}\n\nfunc (self *SymbolTableFrame) Dump() {\n\tprintln()\n\tself.InternalDump(0)\n}\n\nfunc (self *SymbolTableFrame) DumpSingleFrame(frameNumber int) {\n\tif frameNumber == 0 {\n\t\tfmt.Printf(\"%s\\n\", self.CurrentCodeString())\n\t\tfor _, b := range self.Bindings {\n\t\t\tif b.Val == nil || TypeOf(b.Val) != PrimitiveType {\n\t\t\t\tb.Dump()\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t} else if self.Previous != nil {\n\t\tself.Previous.DumpSingleFrame(frameNumber - 1)\n\t} else {\n\t\tfmt.Printf(\"Invalid frame selected.\\n\")\n\t}\n}\n\nfunc (self *SymbolTableFrame) InternalDumpHeaders(frameNumber int) {\n\tfmt.Printf(\"Frame %d: %s\\n\", frameNumber, self.CurrentCodeString())\n\tif self.Previous != nil {\n\t\tself.Previous.InternalDumpHeaders(frameNumber + 1)\n\t}\n}\n\nfunc (self *SymbolTableFrame) DumpHeaders() {\n\tprintln()\n\tself.InternalDumpHeaders(0)\n}\n\nfunc (self *SymbolTableFrame) DumpHeader() {\n\tfmt.Printf(\"%s\\n\", self.CurrentCodeString())\n}\n\nfunc NewSymbolTableFrameBelow(p *SymbolTableFrame, name string) *SymbolTableFrame {\n\tvar f *FrameMap = nil\n\tif p != nil {\n\t\tf = p.Frame\n\t}\n\trestricted := p != nil && p.IsRestricted\n\tenv := &SymbolTableFrame{Name: name, Parent: p, Bindings: make(map[string]*Binding), Frame: f, CurrentCode: list.New(), IsRestricted: restricted}\n\tif p == nil || p == Global {\n\t\tTopLevelEnvironments.Mutex.Lock()\n\t\tdefer TopLevelEnvironments.Mutex.Unlock()\n\n\t\tTopLevelEnvironments.Environments[name] = env\n\t}\n\treturn env\n}\n\nfunc NewSymbolTableFrameBelowWithFrame(p *SymbolTableFrame, f *FrameMap, name string) *SymbolTableFrame {\n\tif f == nil {\n\t\tf = p.Frame\n\t}\n\trestricted := p != nil && p.IsRestricted\n\tenv := &SymbolTableFrame{Name: name, Parent: p, Bindings: make(map[string]*Binding, 10), Frame: f, CurrentCode: list.New(), IsRestricted: restricted}\n\tif p == nil || p == Global {\n\t\tTopLevelEnvironments.Mutex.Lock()\n\t\tdefer TopLevelEnvironments.Mutex.Unlock()\n\n\t\tTopLevelEnvironments.Environments[name] = env\n\t}\n\treturn env\n}\n\nfunc (self *SymbolTableFrame) HasFrame() bool {\n\treturn self.Frame != nil\n}\n\nfunc (self *SymbolTableFrame) BindingNamed(name string) (b *Binding, present bool) {\n\tb, present = self.Bindings[name]\n\treturn\n}\n\nfunc (self *SymbolTableFrame) SetBindingAt(name string, b *Binding) {\n\tself.Bindings[name] = b\n}\n\nfunc (self *SymbolTableFrame) DeleteBinding(name string) {\n\tdelete(self.Bindings, name)\n}\n\nfunc (self *SymbolTableFrame) findSymbol(name string) (symbol *Data, found bool) {\n\tbinding, found := self.BindingNamed(name)\n\tif found {\n\t\treturn binding.Sym, true\n\t} else if self.Parent != nil {\n\t\treturn self.Parent.findSymbol(name)\n\t} else {\n\t\treturn nil, false\n\t}\n}\n\nfunc (self *SymbolTableFrame) FindBindingFor(symbol *Data) (binding *Binding, found bool) {\n\tname := StringValue(symbol)\n\tbinding, found = self.BindingNamed(name)\n\tif found {\n\t\treturn\n\t} else if self.Parent != nil {\n\t\treturn self.Parent.FindBindingFor(symbol)\n\t} else {\n\t\treturn nil, false\n\t}\n}\n\nfunc (self *SymbolTableFrame) Intern(name string) (sym *Data) {\n\tsym = Intern(name)\n\tself.BindTo(sym, nil)\n\treturn\n}\n\nfunc (self *SymbolTableFrame) BindTo(symbol *Data, value *Data) *Data {\n\tbinding, found := self.FindBindingFor(symbol)\n\tif found {\n\t\tbinding.Val = value\n\t} else {\n\t\tbinding := BindingWithSymbolAndValue(symbol, value)\n\t\tself.SetBindingAt(StringValue(symbol), binding)\n\t}\n\treturn value\n}\n\nfunc (self *SymbolTableFrame) SetTo(symbol *Data, value *Data) (result *Data, err error) {\n\tlocalBinding, found := self.findBindingInLocalFrameFor(symbol)\n\tif found {\n\t\tlocalBinding.Val = value\n\t\treturn value, nil\n\t}\n\n\tnaked := StringValue(NakedSymbolFrom(symbol))\n\tif self.HasFrame() && self.Frame.HasSlot(naked) {\n\t\tself.Frame.Set(naked, value)\n\t\treturn value, nil\n\t}\n\n\tbinding, found := self.FindBindingFor(symbol)\n\tif found {\n\t\tbinding.Val = value\n\t\treturn value, nil\n\t}\n\n\treturn nil, errors.New(fmt.Sprintf(\"%s is undefined\", StringValue(symbol)))\n}\n\nfunc (self *SymbolTableFrame) findBindingInLocalFrameFor(symbol *Data) (b *Binding, found bool) {\n\treturn self.BindingNamed(StringValue(symbol))\n}\n\nfunc (self *SymbolTableFrame) BindLocallyTo(symbol *Data, value *Data) *Data {\n\tbinding, found := self.findBindingInLocalFrameFor(symbol)\n\tif found {\n\t\tbinding.Val = value\n\t} else {\n\t\tbinding := BindingWithSymbolAndValue(symbol, value)\n\t\tself.SetBindingAt(StringValue(symbol), binding)\n\t}\n\treturn value\n}\n\nfunc (self *SymbolTableFrame) ValueOfWithFunctionSlotCheck(symbol *Data, needFunction bool) *Data {\n\tlocalBinding, found := self.findBindingInLocalFrameFor(symbol)\n\tif found {\n\t\tif FunctionP(localBinding.Val) {\n\t\t\tatomic.StoreInt32(&FunctionValue(localBinding.Val).SlotFunction, 1)\n\t\t}\n\t\treturn localBinding.Val\n\t}\n\n\tif self.HasFrame() {\n\t\tf := self.Frame\n\t\tnaked := StringValue(NakedSymbolFrom(symbol))\n\t\tif f.HasSlot(naked) {\n\t\t\tslotValue := f.Get(naked)\n\t\t\tif !needFunction {\n\t\t\t\treturn slotValue\n\t\t\t}\n\t\t\tif FunctionP(slotValue) {\n\t\t\t\tatomic.StoreInt32(&FunctionValue(slotValue).SlotFunction, 1)\n\t\t\t\treturn slotValue\n\t\t\t}\n\t\t}\n\t}\n\n\tbinding, found := self.FindBindingFor(symbol)\n\tif found {\n\t\tif FunctionP(binding.Val) {\n\t\t\tatomic.StoreInt32(&FunctionValue(binding.Val).SlotFunction, 0)\n\t\t}\n\t\treturn binding.Val\n\t} else {\n\t\treturn EmptyCons()\n\t}\n}\n\nfunc (self *SymbolTableFrame) ValueOf(symbol *Data) *Data {\n\treturn self.ValueOfWithFunctionSlotCheck(symbol, false)\n}\n<commit_msg>Revert \"don't add naked symbols to the internal symbol map, they don't need to\"<commit_after>\/\/ Copyright 2014 SteelSeries ApS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package implements a basic LISP interpretor for embedding in a go program for scripting.\n\/\/ This file implements the symbol table.\n\npackage golisp\n\nimport (\n\t\"container\/list\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\nconst (\n\tREAD_LOCK = iota\n\tWRITE_LOCK\n)\n\ntype SymbolTableFrame struct {\n\tName string\n\tParent *SymbolTableFrame\n\tPrevious *SymbolTableFrame\n\tFrame *FrameMap\n\tBindings map[string]*Binding\n\tCurrentCode *list.List\n\tIsRestricted bool\n}\n\ntype symbolsTable struct {\n\tSymbols map[string]*Data\n\tMutex sync.RWMutex\n}\n\ntype environmentsTable struct {\n\tEnvironments map[string]*SymbolTableFrame\n\tMutex sync.RWMutex\n}\n\nvar Global *SymbolTableFrame\nvar TopLevelEnvironments environmentsTable = environmentsTable{make(map[string]*SymbolTableFrame, 5), sync.RWMutex{}}\n\nvar internedSymbols symbolsTable = symbolsTable{make(map[string]*Data, 256), sync.RWMutex{}}\n\nfunc Intern(name string) (sym *Data) {\n\tinternedSymbols.Mutex.RLock()\n\tlock := READ_LOCK\n\tdefer func() {\n\t\tif lock == READ_LOCK {\n\t\t\tinternedSymbols.Mutex.RUnlock()\n\t\t} else {\n\t\t\tinternedSymbols.Mutex.Unlock()\n\t\t}\n\t}()\n\n\tsym = internedSymbols.Symbols[name]\n\tif sym == nil {\n\t\tinternedSymbols.Mutex.RUnlock()\n\t\tinternedSymbols.Mutex.Lock()\n\t\tlock = WRITE_LOCK\n\t\tsym = SymbolWithName(name)\n\t\tinternedSymbols.Symbols[name] = sym\n\t}\n\treturn\n}\n\nfunc (self *SymbolTableFrame) Depth() int {\n\tif self.Previous == nil {\n\t\treturn 1\n\t} else {\n\t\treturn 1 + self.Previous.Depth()\n\t}\n}\n\nfunc (self *SymbolTableFrame) CurrentCodeString() string {\n\tif self.CurrentCode.Len() > 0 {\n\t\treturn self.CurrentCode.Front().Value.(string)\n\t} else {\n\t\treturn \"Unknown code\"\n\t}\n}\n\nfunc (self *SymbolTableFrame) InternalDump(frameNumber int) {\n\tfmt.Printf(\"Frame %d: %s\\n\", frameNumber, self.CurrentCodeString())\n\tfor _, b := range self.Bindings {\n\t\tif b.Val == nil || TypeOf(b.Val) != PrimitiveType {\n\t\t\tb.Dump()\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\")\n\tif self.Previous != nil {\n\t\tself.Previous.InternalDump(frameNumber + 1)\n\t}\n}\n\nfunc (self *SymbolTableFrame) Dump() {\n\tprintln()\n\tself.InternalDump(0)\n}\n\nfunc (self *SymbolTableFrame) DumpSingleFrame(frameNumber int) {\n\tif frameNumber == 0 {\n\t\tfmt.Printf(\"%s\\n\", self.CurrentCodeString())\n\t\tfor _, b := range self.Bindings {\n\t\t\tif b.Val == nil || TypeOf(b.Val) != PrimitiveType {\n\t\t\t\tb.Dump()\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t} else if self.Previous != nil {\n\t\tself.Previous.DumpSingleFrame(frameNumber - 1)\n\t} else {\n\t\tfmt.Printf(\"Invalid frame selected.\\n\")\n\t}\n}\n\nfunc (self *SymbolTableFrame) InternalDumpHeaders(frameNumber int) {\n\tfmt.Printf(\"Frame %d: %s\\n\", frameNumber, self.CurrentCodeString())\n\tif self.Previous != nil {\n\t\tself.Previous.InternalDumpHeaders(frameNumber + 1)\n\t}\n}\n\nfunc (self *SymbolTableFrame) DumpHeaders() {\n\tprintln()\n\tself.InternalDumpHeaders(0)\n}\n\nfunc (self *SymbolTableFrame) DumpHeader() {\n\tfmt.Printf(\"%s\\n\", self.CurrentCodeString())\n}\n\nfunc NewSymbolTableFrameBelow(p *SymbolTableFrame, name string) *SymbolTableFrame {\n\tvar f *FrameMap = nil\n\tif p != nil {\n\t\tf = p.Frame\n\t}\n\trestricted := p != nil && p.IsRestricted\n\tenv := &SymbolTableFrame{Name: name, Parent: p, Bindings: make(map[string]*Binding), Frame: f, CurrentCode: list.New(), IsRestricted: restricted}\n\tif p == nil || p == Global {\n\t\tTopLevelEnvironments.Mutex.Lock()\n\t\tdefer TopLevelEnvironments.Mutex.Unlock()\n\n\t\tTopLevelEnvironments.Environments[name] = env\n\t}\n\treturn env\n}\n\nfunc NewSymbolTableFrameBelowWithFrame(p *SymbolTableFrame, f *FrameMap, name string) *SymbolTableFrame {\n\tif f == nil {\n\t\tf = p.Frame\n\t}\n\trestricted := p != nil && p.IsRestricted\n\tenv := &SymbolTableFrame{Name: name, Parent: p, Bindings: make(map[string]*Binding, 10), Frame: f, CurrentCode: list.New(), IsRestricted: restricted}\n\tif p == nil || p == Global {\n\t\tTopLevelEnvironments.Mutex.Lock()\n\t\tdefer TopLevelEnvironments.Mutex.Unlock()\n\n\t\tTopLevelEnvironments.Environments[name] = env\n\t}\n\treturn env\n}\n\nfunc (self *SymbolTableFrame) HasFrame() bool {\n\treturn self.Frame != nil\n}\n\nfunc (self *SymbolTableFrame) BindingNamed(name string) (b *Binding, present bool) {\n\tb, present = self.Bindings[name]\n\treturn\n}\n\nfunc (self *SymbolTableFrame) SetBindingAt(name string, b *Binding) {\n\tself.Bindings[name] = b\n}\n\nfunc (self *SymbolTableFrame) DeleteBinding(name string) {\n\tdelete(self.Bindings, name)\n}\n\nfunc (self *SymbolTableFrame) findSymbol(name string) (symbol *Data, found bool) {\n\tbinding, found := self.BindingNamed(name)\n\tif found {\n\t\treturn binding.Sym, true\n\t} else if self.Parent != nil {\n\t\treturn self.Parent.findSymbol(name)\n\t} else {\n\t\treturn nil, false\n\t}\n}\n\nfunc (self *SymbolTableFrame) FindBindingFor(symbol *Data) (binding *Binding, found bool) {\n\tname := StringValue(symbol)\n\tbinding, found = self.BindingNamed(name)\n\tif found {\n\t\treturn\n\t} else if self.Parent != nil {\n\t\treturn self.Parent.FindBindingFor(symbol)\n\t} else {\n\t\treturn nil, false\n\t}\n}\n\nfunc (self *SymbolTableFrame) Intern(name string) (sym *Data) {\n\tsym = Intern(name)\n\tself.BindTo(sym, nil)\n\treturn\n}\n\nfunc (self *SymbolTableFrame) BindTo(symbol *Data, value *Data) *Data {\n\tbinding, found := self.FindBindingFor(symbol)\n\tif found {\n\t\tbinding.Val = value\n\t} else {\n\t\tbinding := BindingWithSymbolAndValue(symbol, value)\n\t\tself.SetBindingAt(StringValue(symbol), binding)\n\t}\n\treturn value\n}\n\nfunc (self *SymbolTableFrame) SetTo(symbol *Data, value *Data) (result *Data, err error) {\n\tlocalBinding, found := self.findBindingInLocalFrameFor(symbol)\n\tif found {\n\t\tlocalBinding.Val = value\n\t\treturn value, nil\n\t}\n\n\tnaked := StringValue(NakedSymbolFrom(symbol))\n\tif self.HasFrame() && self.Frame.HasSlot(naked) {\n\t\tself.Frame.Set(naked, value)\n\t\treturn value, nil\n\t}\n\n\tbinding, found := self.FindBindingFor(symbol)\n\tif found {\n\t\tbinding.Val = value\n\t\treturn value, nil\n\t}\n\n\treturn nil, errors.New(fmt.Sprintf(\"%s is undefined\", StringValue(symbol)))\n}\n\nfunc (self *SymbolTableFrame) findBindingInLocalFrameFor(symbol *Data) (b *Binding, found bool) {\n\treturn self.BindingNamed(StringValue(symbol))\n}\n\nfunc (self *SymbolTableFrame) BindLocallyTo(symbol *Data, value *Data) *Data {\n\tbinding, found := self.findBindingInLocalFrameFor(symbol)\n\tif found {\n\t\tbinding.Val = value\n\t} else {\n\t\tbinding := BindingWithSymbolAndValue(symbol, value)\n\t\tself.SetBindingAt(StringValue(symbol), binding)\n\t}\n\treturn value\n}\n\nfunc (self *SymbolTableFrame) ValueOfWithFunctionSlotCheck(symbol *Data, needFunction bool) *Data {\n\tlocalBinding, found := self.findBindingInLocalFrameFor(symbol)\n\tif found {\n\t\tif FunctionP(localBinding.Val) {\n\t\t\tatomic.StoreInt32(&FunctionValue(localBinding.Val).SlotFunction, 1)\n\t\t}\n\t\treturn localBinding.Val\n\t}\n\n\tif self.HasFrame() {\n\t\tf := self.Frame\n\t\tnaked := StringValue(NakedSymbolFrom(symbol))\n\t\tif f.HasSlot(naked) {\n\t\t\tslotValue := f.Get(naked)\n\t\t\tif !needFunction {\n\t\t\t\treturn slotValue\n\t\t\t}\n\t\t\tif FunctionP(slotValue) {\n\t\t\t\tatomic.StoreInt32(&FunctionValue(slotValue).SlotFunction, 1)\n\t\t\t\treturn slotValue\n\t\t\t}\n\t\t}\n\t}\n\n\tbinding, found := self.FindBindingFor(symbol)\n\tif found {\n\t\tif FunctionP(binding.Val) {\n\t\t\tatomic.StoreInt32(&FunctionValue(binding.Val).SlotFunction, 0)\n\t\t}\n\t\treturn binding.Val\n\t} else {\n\t\treturn EmptyCons()\n\t}\n}\n\nfunc (self *SymbolTableFrame) ValueOf(symbol *Data) *Data {\n\treturn self.ValueOfWithFunctionSlotCheck(symbol, false)\n}\n<|endoftext|>"} {"text":"<commit_before>package requestHandler\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/zwirec\/TGChatScanner\/models\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\ntype UserJSON struct {\n\tErr string `json:\"error,omitempty\"`\n\tModel *models.User `json:\"entity,omitempty\"`\n}\n\ntype ImagesJSON struct {\n\tErr string `json:\"error\"`\n\tImages []models.Image `json:\"images\"`\n}\n\ntype ChatsJSON struct {\n\tErr string `json:\"error\"`\n\tChats []models.Chat `json:\"chats\"`\n}\n\ntype TagsJSON struct {\n\tErr string `json:\"error\"`\n\tTags []models.Tag `json:\"tags\"`\n}\n\nvar user_key = \"user\"\n\nfunc getImages(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tvalues := req.URL.Query()\n\timg := models.Image{}\n\n\timgs, err := img.GetImgByParams(appContext.Db, values)\n\n\tif err != nil {\n\t\tresponse := ImagesJSON{Err: \"server error\",\n\t\t\tImages: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\n\t\tif err == nil {\n\t\t\twriteResponse(w, string(responseJSON), http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t} else {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t}\n\n\tresponse := ImagesJSON{Err: \"\",\n\t\tImages: imgs}\n\tresponseJSON, err := json.Marshal(response)\n\n\tif err == nil {\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t} else {\n\t\terr_l.Println(err)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc getChatTags(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tvalues := req.URL.Query()\n\n\tchat_id, err := strconv.ParseInt(values[\"chat_id\"][0], 10, 64)\n\n\tif err != nil {\n\t\tresponse := TagsJSON{Err: \"invalid chat_id\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tchat := models.Chat{TGID: chat_id}\n\n\ttags, err := chat.GetTags(appContext.Db)\n\n\tif err != nil {\n\t\terr_l.Println(err)\n\t\tresponse := TagsJSON{Err: \"system error\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t} else {\n\t\tresponse := TagsJSON{Err: \"\",\n\t\t\tTags: tags}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n\n}\n\nfunc getUserTags(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tvalues := req.URL.Query()\n\n\tuser_id, err := strconv.ParseInt(values[\"user_id\"][0], 10, 32)\n\n\tif err != nil {\n\t\tresponse := TagsJSON{Err: \"invalid chat_id\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser := models.User{TGID: int(user_id)}\n\n\ttags, err := user.GetTags(appContext.Db)\n\n\tif err != nil {\n\t\terr_l.Println(err)\n\t\tresponse := TagsJSON{Err: \"system error\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t} else {\n\t\tresponse := TagsJSON{Err: \"\",\n\t\t\tTags: tags}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n\n}\n\nfunc getChats(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tuser := req.Context().Value(user_key).(*models.User)\n\n\tif err := user.GetUsersChats(appContext.Db); err != nil {\n\t\terr_l.Println(err)\n\t\tresponse := ChatsJSON{Err: \"system error\",\n\t\t\tChats: nil}\n\t\tresponseJSON, _ := json.Marshal(response)\n\t\twriteResponse(w, string(responseJSON), http.StatusInternalServerError)\n\n\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tresponse := ChatsJSON{Err: \"\",\n\t\t\tChats: user.Chats}\n\t\tresponseJSON, _ := json.Marshal(response)\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n}\n\nfunc getTags(w http.ResponseWriter, req *http.Request) {\n\t\/\/TODO\n\tfmt.Fprint(w, \"tags.get\")\n\treturn\n}\n\nfunc removeSubs(w http.ResponseWriter, req *http.Request) {\n\t\/\/TODO\n\tfmt.Fprint(w, \"subs.remove\")\n\treturn\n}\n\nfunc validateLoginParams(values map[string]interface{}) (ok bool) {\n\tif values[\"username\"] == nil || values[\"password\"] == nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc writeResponse(w http.ResponseWriter, data interface{}, status int) error {\n\tw.WriteHeader(status)\n\tif data != nil {\n\t\t_, err := fmt.Fprint(w, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc logHttpRequest(l *log.Logger, req *http.Request, code int) {\n\tl.Printf(`%s \"%s %s %s %d\"`, req.RemoteAddr, req.Method, req.URL.Path, req.Proto, code)\n}\n<commit_msg>update<commit_after>package requestHandler\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/zwirec\/TGChatScanner\/models\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\ntype UserJSON struct {\n\tErr string `json:\"error,omitempty\"`\n\tModel *models.User `json:\"entity,omitempty\"`\n}\n\ntype ImagesJSON struct {\n\tErr string `json:\"error\"`\n\tImages []models.Image `json:\"images\"`\n}\n\ntype ChatsJSON struct {\n\tErr string `json:\"error\"`\n\tChats []models.Chat `json:\"chats\"`\n}\n\ntype TagsJSON struct {\n\tErr string `json:\"error\"`\n\tTags []models.Tag `json:\"tags\"`\n}\n\nvar user_key = \"user\"\n\nfunc getImages(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tvalues := req.URL.Query()\n\timg := models.Image{}\n\n\timgs, err := img.GetImgByParams(appContext.Db, values)\n\n\tif err != nil {\n\t\tresponse := ImagesJSON{Err: \"server error\",\n\t\t\tImages: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\n\t\tif err == nil {\n\t\t\twriteResponse(w, string(responseJSON), http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t} else {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t}\n\n\tresponse := ImagesJSON{Err: \"\",\n\t\tImages: imgs}\n\tresponseJSON, err := json.Marshal(response)\n\n\tif err == nil {\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t} else {\n\t\terr_l.Println(err)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc getChatTags(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tvalues := req.URL.Query()\n\n\tchatid, ok := values[\"chat_id\"]\n\n\tif !ok {\n\t\tresponse := TagsJSON{Err: \"invalid chat_id\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tchat_id, err := strconv.ParseInt(chatid[0], 10, 64)\n\n\tif err != nil {\n\t\tresponse := TagsJSON{Err: \"invalid chat_id\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tchat := models.Chat{TGID: chat_id}\n\n\ttags, err := chat.GetTags(appContext.Db)\n\n\tif err != nil {\n\t\terr_l.Println(err)\n\t\tresponse := TagsJSON{Err: \"system error\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t} else {\n\t\tresponse := TagsJSON{Err: \"\",\n\t\t\tTags: tags}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n\n}\n\nfunc getUserTags(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tuser := req.Context().Value(user_key).(*models.User)\n\n\ttags, err := user.GetTags(appContext.Db)\n\n\tif err != nil {\n\t\terr_l.Println(err)\n\t\tresponse := TagsJSON{Err: \"system error\",\n\t\t\tTags: nil}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusBadRequest)\n\n\t\tlogHttpRequest(acc_l, req, http.StatusBadRequest)\n\t\treturn\n\t} else {\n\t\tresponse := TagsJSON{Err: \"\",\n\t\t\tTags: tags}\n\t\tresponseJSON, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\twriteResponse(w, nil, http.StatusInternalServerError)\n\t\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\t\terr_l.Println(err)\n\t\t\treturn\n\t\t}\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n\n}\n\nfunc getChats(w http.ResponseWriter, req *http.Request) {\n\terr_l := appContext.SysLogger\n\tacc_l := appContext.AccessLogger\n\n\tuser := req.Context().Value(user_key).(*models.User)\n\n\tif err := user.GetUsersChats(appContext.Db); err != nil {\n\t\terr_l.Println(err)\n\t\tresponse := ChatsJSON{Err: \"system error\",\n\t\t\tChats: nil}\n\t\tresponseJSON, _ := json.Marshal(response)\n\t\twriteResponse(w, string(responseJSON), http.StatusInternalServerError)\n\n\t\tlogHttpRequest(acc_l, req, http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tresponse := ChatsJSON{Err: \"\",\n\t\t\tChats: user.Chats}\n\t\tresponseJSON, _ := json.Marshal(response)\n\t\twriteResponse(w, string(responseJSON), http.StatusOK)\n\t\tlogHttpRequest(acc_l, req, http.StatusOK)\n\t\treturn\n\t}\n}\n\nfunc getTags(w http.ResponseWriter, req *http.Request) {\n\t\/\/TODO\n\tfmt.Fprint(w, \"tags.get\")\n\treturn\n}\n\nfunc removeSubs(w http.ResponseWriter, req *http.Request) {\n\t\/\/TODO\n\tfmt.Fprint(w, \"subs.remove\")\n\treturn\n}\n\nfunc validateLoginParams(values map[string]interface{}) (ok bool) {\n\tif values[\"username\"] == nil || values[\"password\"] == nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc writeResponse(w http.ResponseWriter, data interface{}, status int) error {\n\tw.WriteHeader(status)\n\tif data != nil {\n\t\t_, err := fmt.Fprint(w, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc logHttpRequest(l *log.Logger, req *http.Request, code int) {\n\tl.Printf(`%s \"%s %s %s %d\"`, req.RemoteAddr, req.Method, req.URL.Path, req.Proto, code)\n}\n<|endoftext|>"} {"text":"<commit_before>package lfs\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/cheggaaa\/pb\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/rubyist\/tracerx\"\n\tcontentaddressable \"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/technoweenie\/go-contentaddressable\"\n)\n\nfunc PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb CopyCallback) error {\n\tos.MkdirAll(filepath.Dir(filename), 0755)\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not create working directory file: %v\", err)\n\t}\n\tdefer file.Close()\n\tif err := PointerSmudge(file, ptr, filename, download, cb); err != nil {\n\t\tif IsDownloadDeclinedError(err) {\n\t\t\t\/\/ write placeholder data instead\n\t\t\tfile.Seek(0, os.SEEK_SET)\n\t\t\tptr.Encode(file)\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Could not write working directory file: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, cb CopyCallback) error {\n\tmediafile, err := LocalMediaPath(ptr.Oid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstat, statErr := os.Stat(mediafile)\n\tif statErr == nil && stat != nil {\n\t\tfileSize := stat.Size()\n\t\tif fileSize == 0 || fileSize != ptr.Size {\n\t\t\ttracerx.Printf(\"Removing %s, size %d is invalid\", mediafile, fileSize)\n\t\t\tos.RemoveAll(mediafile)\n\t\t\tstat = nil\n\t\t}\n\t}\n\n\tif statErr != nil || stat == nil {\n\t\tif download {\n\t\t\terr = downloadFile(writer, ptr, workingfile, mediafile, cb)\n\t\t} else {\n\t\t\treturn newDownloadDeclinedError(nil)\n\t\t}\n\t} else {\n\t\terr = readLocalFile(writer, ptr, mediafile, workingfile, cb)\n\t}\n\n\tif err != nil {\n\t\treturn newSmudgeError(err, ptr.Oid, mediafile)\n\t}\n\n\treturn nil\n}\n\n\/\/ PointerSmudgeObject uses a Pointer and objectResource to download the object to the\n\/\/ media directory. It does not write the file to the working directory.\nfunc PointerSmudgeObject(ptr *Pointer, obj *objectResource, cb CopyCallback) error {\n\tmediafile, err := LocalMediaPath(obj.Oid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstat, statErr := os.Stat(mediafile)\n\tif statErr == nil && stat != nil {\n\t\tfileSize := stat.Size()\n\t\tif fileSize == 0 || fileSize != obj.Size {\n\t\t\ttracerx.Printf(\"Removing %s, size %d is invalid\", mediafile, fileSize)\n\t\t\tos.RemoveAll(mediafile)\n\t\t\tstat = nil\n\t\t}\n\t}\n\n\tif statErr != nil || stat == nil {\n\t\terr := downloadObject(ptr, obj, mediafile, cb)\n\n\t\tif err != nil {\n\t\t\treturn newSmudgeError(err, obj.Oid, mediafile)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc downloadObject(ptr *Pointer, obj *objectResource, mediafile string, cb CopyCallback) error {\n\treader, size, err := DownloadObject(obj)\n\tif reader != nil {\n\t\tdefer reader.Close()\n\t}\n\n\t\/\/ TODO this can be unified with the same code in downloadFile\n\tif err != nil {\n\t\t\/\/ err.Errorf(\"Error downloading %s.\", mediafile) ERRTODO\n\t\treturn err\n\t}\n\n\tif ptr.Size == 0 {\n\t\tptr.Size = size\n\t}\n\n\tmediaFile, err := contentaddressable.NewFile(mediafile)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error opening media file buffer.\")\n\t}\n\n\t_, err = CopyWithCallback(mediaFile, reader, ptr.Size, cb)\n\tif err == nil {\n\t\terr = mediaFile.Accept()\n\t}\n\tmediaFile.Close()\n\n\tif err != nil {\n\t\treturn Errorf(err, \"Error buffering media file.\")\n\t}\n\n\treturn nil\n}\n\nfunc downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, cb CopyCallback) error {\n\tfmt.Fprintf(os.Stderr, \"Downloading %s (%s)\\n\", workingfile, pb.FormatBytes(ptr.Size))\n\treader, size, err := Download(filepath.Base(mediafile))\n\tif reader != nil {\n\t\tdefer reader.Close()\n\t}\n\n\tif err != nil {\n\t\t\/\/err.Errorf(\"Error downloading %s.\", mediafile) ERRTODO\n\t\treturn err\n\t}\n\n\tif ptr.Size == 0 {\n\t\tptr.Size = size\n\t}\n\n\tmediaFile, err := contentaddressable.NewFile(mediafile)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error opening media file buffer.\")\n\t}\n\n\t_, err = CopyWithCallback(mediaFile, reader, ptr.Size, cb)\n\tif err == nil {\n\t\terr = mediaFile.Accept()\n\t}\n\tmediaFile.Close()\n\n\tif err != nil {\n\t\treturn Errorf(err, \"Error buffering media file.\")\n\t}\n\n\treturn readLocalFile(writer, ptr, mediafile, workingfile, nil)\n}\n\nfunc readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb CopyCallback) error {\n\treader, err := os.Open(mediafile)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error opening media file.\")\n\t}\n\tdefer reader.Close()\n\n\tif ptr.Size == 0 {\n\t\tif stat, _ := os.Stat(mediafile); stat != nil {\n\t\t\tptr.Size = stat.Size()\n\t\t}\n\t}\n\n\tif len(ptr.Extensions) > 0 {\n\t\tregisteredExts := Config.Extensions()\n\t\textensions := make(map[string]Extension)\n\t\tfor _, ptrExt := range ptr.Extensions {\n\t\t\text, ok := registeredExts[ptrExt.Name]\n\t\t\tif !ok {\n\t\t\t\terr := fmt.Errorf(\"Extension '%s' is not configured.\", ptrExt.Name)\n\t\t\t\treturn Error(err)\n\t\t\t}\n\t\t\text.Priority = ptrExt.Priority\n\t\t\textensions[ext.Name] = ext\n\t\t}\n\t\texts, err := SortExtensions(extensions)\n\t\tif err != nil {\n\t\t\treturn Error(err)\n\t\t}\n\n\t\t\/\/ pipe extensions in reverse order\n\t\tvar extsR []Extension\n\t\tfor i, _ := range exts {\n\t\t\text := exts[len(exts)-1-i]\n\t\t\textsR = append(extsR, ext)\n\t\t}\n\n\t\trequest := &pipeRequest{\"smudge\", reader, workingfile, extsR}\n\n\t\tresponse, err := pipeExtensions(request)\n\t\tif err != nil {\n\t\t\treturn Error(err)\n\t\t}\n\n\t\tactualExts := make(map[string]*pipeExtResult)\n\t\tfor _, result := range response.results {\n\t\t\tactualExts[result.name] = result\n\t\t}\n\n\t\t\/\/ verify name, order, and oids\n\t\toid := response.results[0].oidIn\n\t\tif ptr.Oid != oid {\n\t\t\terr = fmt.Errorf(\"Actual oid %s during smudge does not match expected %s\", oid, ptr.Oid)\n\t\t\treturn Error(err)\n\t\t}\n\n\t\tfor _, expected := range ptr.Extensions {\n\t\t\tactual := actualExts[expected.Name]\n\t\t\tif actual.name != expected.Name {\n\t\t\t\terr = fmt.Errorf(\"Actual extension name '%s' does not match expected '%s'\", actual.name, expected.Name)\n\t\t\t\treturn Error(err)\n\t\t\t}\n\t\t\tif actual.oidOut != expected.Oid {\n\t\t\t\terr = fmt.Errorf(\"Actual oid %s for extension '%s' does not match expected %s\", actual.oidOut, expected.Name, expected.Oid)\n\t\t\t\treturn Error(err)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ setup reader\n\t\treader, err = os.Open(response.file.Name())\n\t\tif err != nil {\n\t\t\treturn Errorf(err, \"Error opening smudged file.\")\n\t\t}\n\t\tdefer reader.Close()\n\t}\n\n\t_, err = CopyWithCallback(writer, reader, ptr.Size, cb)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error reading from media file.\")\n\t}\n\n\treturn nil\n}\n<commit_msg>ンンンンン ンンンン<commit_after>package lfs\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/cheggaaa\/pb\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/rubyist\/tracerx\"\n\tcontentaddressable \"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/technoweenie\/go-contentaddressable\"\n)\n\nfunc PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb CopyCallback) error {\n\tos.MkdirAll(filepath.Dir(filename), 0755)\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not create working directory file: %v\", err)\n\t}\n\tdefer file.Close()\n\tif err := PointerSmudge(file, ptr, filename, download, cb); err != nil {\n\t\tif IsDownloadDeclinedError(err) {\n\t\t\t\/\/ write placeholder data instead\n\t\t\tfile.Seek(0, os.SEEK_SET)\n\t\t\tptr.Encode(file)\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Could not write working directory file: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, cb CopyCallback) error {\n\tmediafile, err := LocalMediaPath(ptr.Oid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstat, statErr := os.Stat(mediafile)\n\tif statErr == nil && stat != nil {\n\t\tfileSize := stat.Size()\n\t\tif fileSize == 0 || fileSize != ptr.Size {\n\t\t\ttracerx.Printf(\"Removing %s, size %d is invalid\", mediafile, fileSize)\n\t\t\tos.RemoveAll(mediafile)\n\t\t\tstat = nil\n\t\t}\n\t}\n\n\tif statErr != nil || stat == nil {\n\t\tif download {\n\t\t\terr = downloadFile(writer, ptr, workingfile, mediafile, cb)\n\t\t} else {\n\t\t\treturn newDownloadDeclinedError(nil)\n\t\t}\n\t} else {\n\t\terr = readLocalFile(writer, ptr, mediafile, workingfile, cb)\n\t}\n\n\tif err != nil {\n\t\treturn newSmudgeError(err, ptr.Oid, mediafile)\n\t}\n\n\treturn nil\n}\n\n\/\/ PointerSmudgeObject uses a Pointer and objectResource to download the object to the\n\/\/ media directory. It does not write the file to the working directory.\nfunc PointerSmudgeObject(ptr *Pointer, obj *objectResource, cb CopyCallback) error {\n\tmediafile, err := LocalMediaPath(obj.Oid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstat, statErr := os.Stat(mediafile)\n\tif statErr == nil && stat != nil {\n\t\tfileSize := stat.Size()\n\t\tif fileSize == 0 || fileSize != obj.Size {\n\t\t\ttracerx.Printf(\"Removing %s, size %d is invalid\", mediafile, fileSize)\n\t\t\tos.RemoveAll(mediafile)\n\t\t\tstat = nil\n\t\t}\n\t}\n\n\tif statErr != nil || stat == nil {\n\t\terr := downloadObject(ptr, obj, mediafile, cb)\n\n\t\tif err != nil {\n\t\t\treturn newSmudgeError(err, obj.Oid, mediafile)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc downloadObject(ptr *Pointer, obj *objectResource, mediafile string, cb CopyCallback) error {\n\treader, size, err := DownloadObject(obj)\n\tif reader != nil {\n\t\tdefer reader.Close()\n\t}\n\n\t\/\/ TODO this can be unified with the same code in downloadFile\n\tif err != nil {\n\t\treturn Errorf(err, \"Error downloading %s\", mediafile)\n\t}\n\n\tif ptr.Size == 0 {\n\t\tptr.Size = size\n\t}\n\n\tmediaFile, err := contentaddressable.NewFile(mediafile)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error opening media file buffer.\")\n\t}\n\n\t_, err = CopyWithCallback(mediaFile, reader, ptr.Size, cb)\n\tif err == nil {\n\t\terr = mediaFile.Accept()\n\t}\n\tmediaFile.Close()\n\n\tif err != nil {\n\t\treturn Errorf(err, \"Error buffering media file.\")\n\t}\n\n\treturn nil\n}\n\nfunc downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, cb CopyCallback) error {\n\tfmt.Fprintf(os.Stderr, \"Downloading %s (%s)\\n\", workingfile, pb.FormatBytes(ptr.Size))\n\treader, size, err := Download(filepath.Base(mediafile))\n\tif reader != nil {\n\t\tdefer reader.Close()\n\t}\n\n\tif err != nil {\n\t\treturn Errorf(err, \"Error downloading %s.\", mediafile)\n\t}\n\n\tif ptr.Size == 0 {\n\t\tptr.Size = size\n\t}\n\n\tmediaFile, err := contentaddressable.NewFile(mediafile)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error opening media file buffer.\")\n\t}\n\n\t_, err = CopyWithCallback(mediaFile, reader, ptr.Size, cb)\n\tif err == nil {\n\t\terr = mediaFile.Accept()\n\t}\n\tmediaFile.Close()\n\n\tif err != nil {\n\t\treturn Errorf(err, \"Error buffering media file.\")\n\t}\n\n\treturn readLocalFile(writer, ptr, mediafile, workingfile, nil)\n}\n\nfunc readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb CopyCallback) error {\n\treader, err := os.Open(mediafile)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error opening media file.\")\n\t}\n\tdefer reader.Close()\n\n\tif ptr.Size == 0 {\n\t\tif stat, _ := os.Stat(mediafile); stat != nil {\n\t\t\tptr.Size = stat.Size()\n\t\t}\n\t}\n\n\tif len(ptr.Extensions) > 0 {\n\t\tregisteredExts := Config.Extensions()\n\t\textensions := make(map[string]Extension)\n\t\tfor _, ptrExt := range ptr.Extensions {\n\t\t\text, ok := registeredExts[ptrExt.Name]\n\t\t\tif !ok {\n\t\t\t\terr := fmt.Errorf(\"Extension '%s' is not configured.\", ptrExt.Name)\n\t\t\t\treturn Error(err)\n\t\t\t}\n\t\t\text.Priority = ptrExt.Priority\n\t\t\textensions[ext.Name] = ext\n\t\t}\n\t\texts, err := SortExtensions(extensions)\n\t\tif err != nil {\n\t\t\treturn Error(err)\n\t\t}\n\n\t\t\/\/ pipe extensions in reverse order\n\t\tvar extsR []Extension\n\t\tfor i, _ := range exts {\n\t\t\text := exts[len(exts)-1-i]\n\t\t\textsR = append(extsR, ext)\n\t\t}\n\n\t\trequest := &pipeRequest{\"smudge\", reader, workingfile, extsR}\n\n\t\tresponse, err := pipeExtensions(request)\n\t\tif err != nil {\n\t\t\treturn Error(err)\n\t\t}\n\n\t\tactualExts := make(map[string]*pipeExtResult)\n\t\tfor _, result := range response.results {\n\t\t\tactualExts[result.name] = result\n\t\t}\n\n\t\t\/\/ verify name, order, and oids\n\t\toid := response.results[0].oidIn\n\t\tif ptr.Oid != oid {\n\t\t\terr = fmt.Errorf(\"Actual oid %s during smudge does not match expected %s\", oid, ptr.Oid)\n\t\t\treturn Error(err)\n\t\t}\n\n\t\tfor _, expected := range ptr.Extensions {\n\t\t\tactual := actualExts[expected.Name]\n\t\t\tif actual.name != expected.Name {\n\t\t\t\terr = fmt.Errorf(\"Actual extension name '%s' does not match expected '%s'\", actual.name, expected.Name)\n\t\t\t\treturn Error(err)\n\t\t\t}\n\t\t\tif actual.oidOut != expected.Oid {\n\t\t\t\terr = fmt.Errorf(\"Actual oid %s for extension '%s' does not match expected %s\", actual.oidOut, expected.Name, expected.Oid)\n\t\t\t\treturn Error(err)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ setup reader\n\t\treader, err = os.Open(response.file.Name())\n\t\tif err != nil {\n\t\t\treturn Errorf(err, \"Error opening smudged file.\")\n\t\t}\n\t\tdefer reader.Close()\n\t}\n\n\t_, err = CopyWithCallback(writer, reader, ptr.Size, cb)\n\tif err != nil {\n\t\treturn Errorf(err, \"Error reading from media file.\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017-present Kirill Danshin and Gramework contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\npackage gramework\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/valyala\/fasthttp\"\n)\n\ntype reqHandlerDefault interface {\n\tHandler(*Context)\n}\n\ntype reqHandlerWithError interface {\n\tHandler(*Context) error\n}\n\ntype reqHandlerWithEfaceError interface {\n\tHandler(*Context) (interface{}, error)\n}\n\ntype reqHandlerWithEface interface {\n\tHandler(*Context) interface{}\n}\n\ntype reqHandlerNoCtx interface {\n\tHandler()\n}\n\ntype reqHandlerWithErrorNoCtx interface {\n\tHandler() error\n}\n\ntype reqHandlerWithEfaceErrorNoCtx interface {\n\tHandler() (interface{}, error)\n}\n\ntype reqHandlerWithEfaceNoCtx interface {\n\tHandler() interface{}\n}\n\nfunc (r *Router) determineHandler(handler interface{}) func(*Context) {\n\t\/\/ copy handler, we don't want to mutate our arguments\n\trawHandler := handler\n\n\t\/\/ prepare handler in case if it one of our supported interfaces\n\tswitch h := handler.(type) {\n\tcase reqHandlerDefault:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithError:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEfaceError:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEface:\n\t\trawHandler = h.Handler\n\tcase reqHandlerNoCtx:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithErrorNoCtx:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEfaceErrorNoCtx:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEfaceNoCtx:\n\t\trawHandler = h.Handler\n\t}\n\n\t\/\/ finally, process the handler\n\tswitch h := rawHandler.(type) {\n\tcase HTML:\n\t\treturn r.getHTMLServer(h)\n\tcase JSON:\n\t\treturn r.getJSONServer(h)\n\tcase func(*Context):\n\t\treturn h\n\tcase RequestHandler:\n\t\treturn h\n\tcase func(*Context) error:\n\t\treturn r.getErrorHandler(h)\n\tcase func(*fasthttp.RequestCtx):\n\t\treturn r.getGrameHandler(h)\n\tcase func(*fasthttp.RequestCtx) error:\n\t\treturn r.getGrameErrorHandler(h)\n\tcase func() interface{}:\n\t\treturn r.getEfaceEncoder(h)\n\tcase func() (interface{}, error):\n\t\treturn r.getEfaceErrEncoder(h)\n\tcase func(*Context) interface{}:\n\t\treturn r.getEfaceCtxEncoder(h)\n\tcase func(*Context) (interface{}, error):\n\t\treturn r.getEfaceCtxErrEncoder(h)\n\tcase string:\n\t\treturn r.getStringServer(h)\n\tcase []byte:\n\t\treturn r.getBytesServer(h)\n\tcase int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:\n\t\treturn r.getFmtDHandler(h)\n\tcase float32, float64:\n\t\treturn r.getFmtFHandler(h)\n\tcase func():\n\t\treturn r.getGrameDumbHandler(h)\n\tcase func() error:\n\t\treturn r.getGrameDumbErrorHandler(h)\n\tcase func() string:\n\t\treturn r.getEFuncStrHandler(h)\n\tcase func() map[string]interface{}:\n\t\treturn r.getHandlerEncoder(h)\n\tcase func(*Context) map[string]interface{}:\n\t\treturn r.getCtxHandlerEncoder(h)\n\tcase func() (map[string]interface{}, error):\n\t\treturn r.getHandlerEncoderErr(h)\n\tcase func(*Context) (map[string]interface{}, error):\n\t\treturn r.getCtxHandlerEncoderErr(h)\n\tdefault:\n\t\trv := reflect.ValueOf(h)\n\t\tif rv.Kind() == reflect.Func {\n\t\t\thandler, err := r.getCachedReflectHandler(h)\n\t\t\tif err != nil {\n\t\t\t\tr.app.internalLog.WithError(err).Fatal(\"Unsupported reflect handler signature\")\n\t\t\t}\n\n\t\t\treturn handler\n\t\t}\n\t\tr.app.internalLog.Warnf(\"Unknown handler type: %T, serving fmt.Sprintf(%%v)\", h)\n\t\treturn r.getFmtVHandler(h)\n\t}\n}\n\ntype reflectDecodedBodyRecv struct {\n\tidx int\n\tt reflect.Type\n}\n\nfunc (r *Router) getCachedReflectHandler(h interface{}) (func(*Context), error) {\n\tfuncT := reflect.TypeOf(h)\n\tif funcT.IsVariadic() {\n\t\treturn nil, errors.New(\"could not process variadic reflect handler\")\n\t}\n\n\tresults := funcT.NumOut()\n\tif results > 2 {\n\t\treturn nil, errors.New(\"reflect handler output should be one of (any), (any, error), (error) or ()\")\n\t}\n\n\tparams := funcT.NumIn()\n\tdecodedBodyRecv := []reflectDecodedBodyRecv{}\n\tctxRecv := -1\n\n\tcheckForErrorAt := -1\n\tencodeDataAt := -1\n\n\tfor i := 0; i < params; i++ {\n\t\tp := funcT.In(i)\n\t\tif strings.Contains(p.String(), \"*gramework.Context\") {\n\t\t\tctxRecv = i\n\t\t\tcontinue\n\t\t}\n\t\tdecodedBodyRecv = append(decodedBodyRecv, reflectDecodedBodyRecv{\n\t\t\tidx: i,\n\t\t\tt: p,\n\t\t})\n\t}\n\n\tfor i := 0; i < results; i++ {\n\t\tr := funcT.Out(i)\n\t\tprintln(r.String())\n\n\t\tif r.String() == \"error\" {\n\t\t\tif i == 0 && results > 1 {\n\t\t\t\treturn nil, errors.New(\"reflect handler output should be one of (any), (any, error), (error) or ()\")\n\t\t\t}\n\n\t\t\tcheckForErrorAt = i\n\t\t\tcontinue\n\t\t}\n\n\t\tif encodeDataAt >= 0 {\n\t\t\treturn nil, errors.New(\"reflect handler output should be one of (any), (any, error), (error) or ()\")\n\t\t}\n\n\t\tencodeDataAt = i\n\t}\n\n\tfuncV := reflect.ValueOf(h)\n\n\thandler := r.getEfaceCtxErrEncoder(func(ctx *Context) (v interface{}, err error) {\n\t\tcallParams := make([]reflect.Value, params)\n\t\tif len(decodedBodyRecv) > 0 {\n\t\t\tunsupportedBodyType := true\n\t\t\tfor i := range decodedBodyRecv {\n\t\t\t\tdecoded := reflect.New(decodedBodyRecv[i].t).Interface()\n\t\t\t\tif jsonErr := ctx.UnJSON(decoded); jsonErr == nil {\n\t\t\t\t\tunsupportedBodyType = false\n\t\t\t\t\tdecodedV := reflect.ValueOf(decoded)\n\n\t\t\t\t\tcallParams[decodedBodyRecv[i].idx] = decodedV.Elem()\n\t\t\t\t} else {\n\t\t\t\t\tcallParams[decodedBodyRecv[i].idx] = reflect.Zero(decodedBodyRecv[i].t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif unsupportedBodyType {\n\t\t\t\tctx.SetStatusCode(500)\n\t\t\t\tctx.Logger.Error(\"unsupported body type\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif ctxRecv >= 0 {\n\t\t\tcallParams[ctxRecv] = reflect.ValueOf(ctx)\n\t\t}\n\n\t\tres := funcV.Call(callParams)\n\t\tif checkForErrorAt >= 0 && !res[checkForErrorAt].IsNil() {\n\t\t\tresErr, ok := res[checkForErrorAt].Interface().(error)\n\t\t\tif ok {\n\t\t\t\terr = resErr\n\t\t\t} else {\n\t\t\t\terr = errUnknown\n\t\t\t}\n\t\t}\n\n\t\tif encodeDataAt >= 0 {\n\t\t\tv = res[encodeDataAt].Interface()\n\t\t}\n\t\treturn\n\t})\n\n\treturn handler, nil\n}\n\nvar errUnknown = errors.New(\"Unknown Server Error\")\n<commit_msg>fix determineHandler bug for reflect handlers<commit_after>\/\/ Copyright 2017-present Kirill Danshin and Gramework contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\npackage gramework\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/valyala\/fasthttp\"\n)\n\ntype reqHandlerDefault interface {\n\tHandler(*Context)\n}\n\ntype reqHandlerWithError interface {\n\tHandler(*Context) error\n}\n\ntype reqHandlerWithEfaceError interface {\n\tHandler(*Context) (interface{}, error)\n}\n\ntype reqHandlerWithEface interface {\n\tHandler(*Context) interface{}\n}\n\ntype reqHandlerNoCtx interface {\n\tHandler()\n}\n\ntype reqHandlerWithErrorNoCtx interface {\n\tHandler() error\n}\n\ntype reqHandlerWithEfaceErrorNoCtx interface {\n\tHandler() (interface{}, error)\n}\n\ntype reqHandlerWithEfaceNoCtx interface {\n\tHandler() interface{}\n}\n\nfunc (r *Router) determineHandler(handler interface{}) func(*Context) {\n\t\/\/ copy handler, we don't want to mutate our arguments\n\trawHandler := handler\n\n\t\/\/ prepare handler in case if it one of our supported interfaces\n\tswitch h := handler.(type) {\n\tcase reqHandlerDefault:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithError:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEfaceError:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEface:\n\t\trawHandler = h.Handler\n\tcase reqHandlerNoCtx:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithErrorNoCtx:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEfaceErrorNoCtx:\n\t\trawHandler = h.Handler\n\tcase reqHandlerWithEfaceNoCtx:\n\t\trawHandler = h.Handler\n\t}\n\n\t\/\/ finally, process the handler\n\tswitch h := rawHandler.(type) {\n\tcase HTML:\n\t\treturn r.getHTMLServer(h)\n\tcase JSON:\n\t\treturn r.getJSONServer(h)\n\tcase func(*Context):\n\t\treturn h\n\tcase RequestHandler:\n\t\treturn h\n\tcase func(*Context) error:\n\t\treturn r.getErrorHandler(h)\n\tcase func(*fasthttp.RequestCtx):\n\t\treturn r.getGrameHandler(h)\n\tcase func(*fasthttp.RequestCtx) error:\n\t\treturn r.getGrameErrorHandler(h)\n\tcase func() interface{}:\n\t\treturn r.getEfaceEncoder(h)\n\tcase func() (interface{}, error):\n\t\treturn r.getEfaceErrEncoder(h)\n\tcase func(*Context) interface{}:\n\t\treturn r.getEfaceCtxEncoder(h)\n\tcase func(*Context) (interface{}, error):\n\t\treturn r.getEfaceCtxErrEncoder(h)\n\tcase string:\n\t\treturn r.getStringServer(h)\n\tcase []byte:\n\t\treturn r.getBytesServer(h)\n\tcase int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:\n\t\treturn r.getFmtDHandler(h)\n\tcase float32, float64:\n\t\treturn r.getFmtFHandler(h)\n\tcase func():\n\t\treturn r.getGrameDumbHandler(h)\n\tcase func() error:\n\t\treturn r.getGrameDumbErrorHandler(h)\n\tcase func() string:\n\t\treturn r.getEFuncStrHandler(h)\n\tcase func() map[string]interface{}:\n\t\treturn r.getHandlerEncoder(h)\n\tcase func(*Context) map[string]interface{}:\n\t\treturn r.getCtxHandlerEncoder(h)\n\tcase func() (map[string]interface{}, error):\n\t\treturn r.getHandlerEncoderErr(h)\n\tcase func(*Context) (map[string]interface{}, error):\n\t\treturn r.getCtxHandlerEncoderErr(h)\n\tdefault:\n\t\trv := reflect.ValueOf(h)\n\t\tif rv.Kind() == reflect.Func {\n\t\t\thandler, err := r.getCachedReflectHandler(h)\n\t\t\tif err != nil {\n\t\t\t\tr.app.internalLog.WithError(err).Fatal(\"Unsupported reflect handler signature\")\n\t\t\t}\n\n\t\t\treturn handler\n\t\t}\n\t\tr.app.internalLog.Warnf(\"Unknown handler type: %T, serving fmt.Sprintf(%%v)\", h)\n\t\treturn r.getFmtVHandler(h)\n\t}\n}\n\ntype reflectDecodedBodyRecv struct {\n\tidx int\n\tt reflect.Type\n}\n\nfunc (r *Router) getCachedReflectHandler(h interface{}) (func(*Context), error) {\n\tfuncT := reflect.TypeOf(h)\n\tif funcT.IsVariadic() {\n\t\treturn nil, errors.New(\"could not process variadic reflect handler\")\n\t}\n\n\tresults := funcT.NumOut()\n\tif results > 2 {\n\t\treturn nil, errors.New(\"reflect handler output should be one of (any), (any, error), (error) or ()\")\n\t}\n\n\tparams := funcT.NumIn()\n\tdecodedBodyRecv := []reflectDecodedBodyRecv{}\n\tctxRecv := -1\n\n\tcheckForErrorAt := -1\n\tencodeDataAt := -1\n\n\tfor i := 0; i < params; i++ {\n\t\tp := funcT.In(i)\n\t\tif strings.Contains(p.String(), \"*gramework.Context\") {\n\t\t\tctxRecv = i\n\t\t\tcontinue\n\t\t}\n\t\tdecodedBodyRecv = append(decodedBodyRecv, reflectDecodedBodyRecv{\n\t\t\tidx: i,\n\t\t\tt: p,\n\t\t})\n\t}\n\n\tfor i := 0; i < results; i++ {\n\t\tr := funcT.Out(i)\n\t\tprintln(r.String())\n\n\t\tif r.String() == \"error\" {\n\t\t\tif i == 0 && results > 1 {\n\t\t\t\treturn nil, errors.New(\"reflect handler output should be one of (any), (any, error), (error) or ()\")\n\t\t\t}\n\n\t\t\tcheckForErrorAt = i\n\t\t\tcontinue\n\t\t}\n\n\t\tif encodeDataAt >= 0 {\n\t\t\treturn nil, errors.New(\"reflect handler output should be one of (any), (any, error), (error) or ()\")\n\t\t}\n\n\t\tencodeDataAt = i\n\t}\n\n\tfuncV := reflect.ValueOf(h)\n\n\thandler := func(ctx *Context) {\n\t\tcallParams := make([]reflect.Value, params)\n\t\tif len(decodedBodyRecv) > 0 {\n\t\t\tunsupportedBodyType := true\n\t\t\tfor i := range decodedBodyRecv {\n\t\t\t\tdecoded := reflect.New(decodedBodyRecv[i].t).Interface()\n\t\t\t\tif jsonErr := ctx.UnJSON(decoded); jsonErr == nil {\n\t\t\t\t\tunsupportedBodyType = false\n\t\t\t\t\tdecodedV := reflect.ValueOf(decoded)\n\n\t\t\t\t\tcallParams[decodedBodyRecv[i].idx] = decodedV.Elem()\n\t\t\t\t} else {\n\t\t\t\t\tcallParams[decodedBodyRecv[i].idx] = reflect.Zero(decodedBodyRecv[i].t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif unsupportedBodyType {\n\t\t\t\tctx.SetStatusCode(500)\n\t\t\t\tctx.Logger.Error(\"unsupported body type\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif ctxRecv >= 0 {\n\t\t\tcallParams[ctxRecv] = reflect.ValueOf(ctx)\n\t\t}\n\n\t\tres := funcV.Call(callParams)\n\t\tshouldProcessErr := false\n\t\tshouldProcessReturn := false\n\t\tvar err error\n\t\tif checkForErrorAt >= 0 && !res[checkForErrorAt].IsNil() {\n\t\t\tresErr, ok := res[checkForErrorAt].Interface().(error)\n\t\t\tif ok {\n\t\t\t\terr = resErr\n\t\t\t} else {\n\t\t\t\terr = errUnknown\n\t\t\t}\n\t\t\tshouldProcessErr = true\n\t\t}\n\n\t\tvar v interface{}\n\t\tif encodeDataAt >= 0 {\n\t\t\tv = res[encodeDataAt].Interface()\n\t\t\tshouldProcessReturn = true\n\t\t}\n\t\tif shouldProcessErr {\n\t\t\tif err != nil {\n\t\t\t\tctx.jsonErrorLog(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif shouldProcessReturn {\n\t\t\tif v == nil { \/\/ err == nil here\n\t\t\t\tctx.SetStatusCode(fasthttp.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = ctx.JSON(v); err != nil {\n\t\t\t\tctx.jsonErrorLog(err)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\treturn handler, nil\n}\n\nvar errUnknown = errors.New(\"Unknown Server Error\")\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tikv\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/metrics\"\n\t\"github.com\/pingcap\/tidb\/mysql\"\n\t\"github.com\/pingcap\/tidb\/terror\"\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ NoJitter makes the backoff sequence strict exponential.\n\tNoJitter = 1 + iota\n\t\/\/ FullJitter applies random factors to strict exponential.\n\tFullJitter\n\t\/\/ EqualJitter is also randomized, but prevents very short sleeps.\n\tEqualJitter\n\t\/\/ DecorrJitter increases the maximum jitter based on the last random value.\n\tDecorrJitter\n)\n\n\/\/ NewBackoffFn creates a backoff func which implements exponential backoff with\n\/\/ optional jitters.\n\/\/ See http:\/\/www.awsarchitectureblog.com\/2015\/03\/backoff.html\nfunc NewBackoffFn(base, cap, jitter int) func(ctx context.Context) int {\n\tif base < 2 {\n\t\t\/\/ Top prevent panic in 'rand.Intn'.\n\t\tbase = 2\n\t}\n\tattempts := 0\n\tlastSleep := base\n\treturn func(ctx context.Context) int {\n\t\tvar sleep int\n\t\tswitch jitter {\n\t\tcase NoJitter:\n\t\t\tsleep = expo(base, cap, attempts)\n\t\tcase FullJitter:\n\t\t\tv := expo(base, cap, attempts)\n\t\t\tsleep = rand.Intn(v)\n\t\tcase EqualJitter:\n\t\t\tv := expo(base, cap, attempts)\n\t\t\tsleep = v\/2 + rand.Intn(v\/2)\n\t\tcase DecorrJitter:\n\t\t\tsleep = int(math.Min(float64(cap), float64(base+rand.Intn(lastSleep*3-base))))\n\t\t}\n\t\tlog.Debugf(\"backoff base %d, sleep %d\", base, sleep)\n\t\tselect {\n\t\tcase <-time.After(time.Duration(sleep) * time.Millisecond):\n\t\tcase <-ctx.Done():\n\t\t}\n\n\t\tattempts++\n\t\tlastSleep = sleep\n\t\treturn lastSleep\n\t}\n}\n\nfunc expo(base, cap, n int) int {\n\treturn int(math.Min(float64(cap), float64(base)*math.Pow(2.0, float64(n))))\n}\n\ntype backoffType int\n\n\/\/ Back off types.\nconst (\n\tboTiKVRPC backoffType = iota\n\tBoTxnLock\n\tboTxnLockFast\n\tboPDRPC\n\tBoRegionMiss\n\tBoUpdateLeader\n\tboServerBusy\n)\n\nfunc (t backoffType) createFn(vars *kv.Variables) func(context.Context) int {\n\tif vars.Hook != nil {\n\t\tvars.Hook(t.String(), vars)\n\t}\n\tswitch t {\n\tcase boTiKVRPC:\n\t\treturn NewBackoffFn(100, 2000, EqualJitter)\n\tcase BoTxnLock:\n\t\treturn NewBackoffFn(200, 3000, EqualJitter)\n\tcase boTxnLockFast:\n\t\treturn NewBackoffFn(vars.BackoffLockFast, 3000, EqualJitter)\n\tcase boPDRPC:\n\t\treturn NewBackoffFn(500, 3000, EqualJitter)\n\tcase BoRegionMiss:\n\t\treturn NewBackoffFn(100, 500, NoJitter)\n\tcase BoUpdateLeader:\n\t\treturn NewBackoffFn(1, 10, NoJitter)\n\tcase boServerBusy:\n\t\treturn NewBackoffFn(2000, 10000, EqualJitter)\n\t}\n\treturn nil\n}\n\nfunc (t backoffType) String() string {\n\tswitch t {\n\tcase boTiKVRPC:\n\t\treturn \"tikvRPC\"\n\tcase BoTxnLock:\n\t\treturn \"txnLock\"\n\tcase boTxnLockFast:\n\t\treturn \"txnLockFast\"\n\tcase boPDRPC:\n\t\treturn \"pdRPC\"\n\tcase BoRegionMiss:\n\t\treturn \"regionMiss\"\n\tcase BoUpdateLeader:\n\t\treturn \"updateLeader\"\n\tcase boServerBusy:\n\t\treturn \"serverBusy\"\n\t}\n\treturn \"\"\n}\n\nfunc (t backoffType) TError() error {\n\tswitch t {\n\tcase boTiKVRPC:\n\t\treturn ErrTiKVServerTimeout\n\tcase BoTxnLock, boTxnLockFast:\n\t\treturn ErrResolveLockTimeout\n\tcase boPDRPC:\n\t\treturn ErrPDServerTimeout.GenWithStackByArgs(txnRetryableMark)\n\tcase BoRegionMiss, BoUpdateLeader:\n\t\treturn ErrRegionUnavailable\n\tcase boServerBusy:\n\t\treturn ErrTiKVServerBusy\n\t}\n\treturn terror.ClassTiKV.New(mysql.ErrUnknown, mysql.MySQLErrName[mysql.ErrUnknown])\n}\n\n\/\/ Maximum total sleep time(in ms) for kv\/cop commands.\nconst (\n\tcopBuildTaskMaxBackoff = 5000\n\ttsoMaxBackoff = 15000\n\tscannerNextMaxBackoff = 20000\n\tbatchGetMaxBackoff = 20000\n\tcopNextMaxBackoff = 20000\n\tgetMaxBackoff = 20000\n\tprewriteMaxBackoff = 20000\n\tcleanupMaxBackoff = 20000\n\tGcOneRegionMaxBackoff = 20000\n\tGcResolveLockMaxBackoff = 100000\n\tdeleteRangeOneRegionMaxBackoff = 100000\n\trawkvMaxBackoff = 20000\n\tsplitRegionBackoff = 20000\n)\n\n\/\/ CommitMaxBackoff is max sleep time of the 'commit' command\nvar CommitMaxBackoff = 41000\n\n\/\/ Backoffer is a utility for retrying queries.\ntype Backoffer struct {\n\tctx context.Context\n\n\tfn map[backoffType]func(context.Context) int\n\tmaxSleep int\n\ttotalSleep int\n\terrors []error\n\ttypes []backoffType\n\tvars *kv.Variables\n}\n\n\/\/ NewBackoffer creates a Backoffer with maximum sleep time(in ms).\nfunc NewBackoffer(ctx context.Context, maxSleep int) *Backoffer {\n\treturn &Backoffer{\n\t\tctx: ctx,\n\t\tmaxSleep: maxSleep,\n\t\tvars: kv.DefaultVars,\n\t}\n}\n\n\/\/ WithVars sets the kv.Variables to the Backoffer and return it.\nfunc (b *Backoffer) WithVars(vars *kv.Variables) *Backoffer {\n\tb.vars = vars\n\treturn b\n}\n\n\/\/ Backoff sleeps a while base on the backoffType and records the error message.\n\/\/ It returns a retryable error if total sleep time exceeds maxSleep.\nfunc (b *Backoffer) Backoff(typ backoffType, err error) error {\n\tif strings.Contains(err.Error(), mismatchClusterID) {\n\t\tlog.Fatalf(\"critical error %v\", err)\n\t}\n\tselect {\n\tcase <-b.ctx.Done():\n\t\treturn errors.Trace(err)\n\tdefault:\n\t}\n\n\tmetrics.TiKVBackoffCounter.WithLabelValues(typ.String()).Inc()\n\t\/\/ Lazy initialize.\n\tif b.fn == nil {\n\t\tb.fn = make(map[backoffType]func(context.Context) int)\n\t}\n\tf, ok := b.fn[typ]\n\tif !ok {\n\t\tf = typ.createFn(b.vars)\n\t\tb.fn[typ] = f\n\t}\n\n\tb.totalSleep += f(b.ctx)\n\tb.types = append(b.types, typ)\n\n\tlog.Debugf(\"%v, retry later(totalsleep %dms, maxsleep %dms)\", err, b.totalSleep, b.maxSleep)\n\n\tb.errors = append(b.errors, errors.Errorf(\"%s at %s\", err.Error(), time.Now().Format(time.RFC3339Nano)))\n\tif b.maxSleep > 0 && b.totalSleep >= b.maxSleep {\n\t\terrMsg := fmt.Sprintf(\"backoffer.maxSleep %dms is exceeded, errors:\", b.maxSleep)\n\t\tfor i, err := range b.errors {\n\t\t\t\/\/ Print only last 3 errors for non-DEBUG log levels.\n\t\t\tif log.GetLevel() == log.DebugLevel || i >= len(b.errors)-3 {\n\t\t\t\terrMsg += \"\\n\" + err.Error()\n\t\t\t}\n\t\t}\n\t\tlog.Warn(errMsg)\n\t\t\/\/ Use the first backoff type to generate a MySQL error.\n\t\treturn b.types[0].TError()\n\t}\n\treturn nil\n}\n\nfunc (b *Backoffer) String() string {\n\tif b.totalSleep == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\" backoff(%dms %v)\", b.totalSleep, b.types)\n}\n\n\/\/ Clone creates a new Backoffer which keeps current Backoffer's sleep time and errors, and shares\n\/\/ current Backoffer's context.\nfunc (b *Backoffer) Clone() *Backoffer {\n\treturn &Backoffer{\n\t\tctx: b.ctx,\n\t\tmaxSleep: b.maxSleep,\n\t\ttotalSleep: b.totalSleep,\n\t\terrors: b.errors,\n\t\tvars: b.vars,\n\t}\n}\n\n\/\/ Fork creates a new Backoffer which keeps current Backoffer's sleep time and errors, and holds\n\/\/ a child context of current Backoffer's context.\nfunc (b *Backoffer) Fork() (*Backoffer, context.CancelFunc) {\n\tctx, cancel := context.WithCancel(b.ctx)\n\treturn &Backoffer{\n\t\tctx: ctx,\n\t\tmaxSleep: b.maxSleep,\n\t\ttotalSleep: b.totalSleep,\n\t\terrors: b.errors,\n\t\tvars: b.vars,\n\t}, cancel\n}\n<commit_msg>change the backoff base time of BoRegionMiss to 2m (#7775)<commit_after>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tikv\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/metrics\"\n\t\"github.com\/pingcap\/tidb\/mysql\"\n\t\"github.com\/pingcap\/tidb\/terror\"\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ NoJitter makes the backoff sequence strict exponential.\n\tNoJitter = 1 + iota\n\t\/\/ FullJitter applies random factors to strict exponential.\n\tFullJitter\n\t\/\/ EqualJitter is also randomized, but prevents very short sleeps.\n\tEqualJitter\n\t\/\/ DecorrJitter increases the maximum jitter based on the last random value.\n\tDecorrJitter\n)\n\n\/\/ NewBackoffFn creates a backoff func which implements exponential backoff with\n\/\/ optional jitters.\n\/\/ See http:\/\/www.awsarchitectureblog.com\/2015\/03\/backoff.html\nfunc NewBackoffFn(base, cap, jitter int) func(ctx context.Context) int {\n\tif base < 2 {\n\t\t\/\/ Top prevent panic in 'rand.Intn'.\n\t\tbase = 2\n\t}\n\tattempts := 0\n\tlastSleep := base\n\treturn func(ctx context.Context) int {\n\t\tvar sleep int\n\t\tswitch jitter {\n\t\tcase NoJitter:\n\t\t\tsleep = expo(base, cap, attempts)\n\t\tcase FullJitter:\n\t\t\tv := expo(base, cap, attempts)\n\t\t\tsleep = rand.Intn(v)\n\t\tcase EqualJitter:\n\t\t\tv := expo(base, cap, attempts)\n\t\t\tsleep = v\/2 + rand.Intn(v\/2)\n\t\tcase DecorrJitter:\n\t\t\tsleep = int(math.Min(float64(cap), float64(base+rand.Intn(lastSleep*3-base))))\n\t\t}\n\t\tlog.Debugf(\"backoff base %d, sleep %d\", base, sleep)\n\t\tselect {\n\t\tcase <-time.After(time.Duration(sleep) * time.Millisecond):\n\t\tcase <-ctx.Done():\n\t\t}\n\n\t\tattempts++\n\t\tlastSleep = sleep\n\t\treturn lastSleep\n\t}\n}\n\nfunc expo(base, cap, n int) int {\n\treturn int(math.Min(float64(cap), float64(base)*math.Pow(2.0, float64(n))))\n}\n\ntype backoffType int\n\n\/\/ Back off types.\nconst (\n\tboTiKVRPC backoffType = iota\n\tBoTxnLock\n\tboTxnLockFast\n\tboPDRPC\n\tBoRegionMiss\n\tBoUpdateLeader\n\tboServerBusy\n)\n\nfunc (t backoffType) createFn(vars *kv.Variables) func(context.Context) int {\n\tif vars.Hook != nil {\n\t\tvars.Hook(t.String(), vars)\n\t}\n\tswitch t {\n\tcase boTiKVRPC:\n\t\treturn NewBackoffFn(100, 2000, EqualJitter)\n\tcase BoTxnLock:\n\t\treturn NewBackoffFn(200, 3000, EqualJitter)\n\tcase boTxnLockFast:\n\t\treturn NewBackoffFn(vars.BackoffLockFast, 3000, EqualJitter)\n\tcase boPDRPC:\n\t\treturn NewBackoffFn(500, 3000, EqualJitter)\n\tcase BoRegionMiss:\n\t\t\/\/ change base time to 2ms, because it may recover soon.\n\t\treturn NewBackoffFn(2, 500, NoJitter)\n\tcase BoUpdateLeader:\n\t\treturn NewBackoffFn(1, 10, NoJitter)\n\tcase boServerBusy:\n\t\treturn NewBackoffFn(2000, 10000, EqualJitter)\n\t}\n\treturn nil\n}\n\nfunc (t backoffType) String() string {\n\tswitch t {\n\tcase boTiKVRPC:\n\t\treturn \"tikvRPC\"\n\tcase BoTxnLock:\n\t\treturn \"txnLock\"\n\tcase boTxnLockFast:\n\t\treturn \"txnLockFast\"\n\tcase boPDRPC:\n\t\treturn \"pdRPC\"\n\tcase BoRegionMiss:\n\t\treturn \"regionMiss\"\n\tcase BoUpdateLeader:\n\t\treturn \"updateLeader\"\n\tcase boServerBusy:\n\t\treturn \"serverBusy\"\n\t}\n\treturn \"\"\n}\n\nfunc (t backoffType) TError() error {\n\tswitch t {\n\tcase boTiKVRPC:\n\t\treturn ErrTiKVServerTimeout\n\tcase BoTxnLock, boTxnLockFast:\n\t\treturn ErrResolveLockTimeout\n\tcase boPDRPC:\n\t\treturn ErrPDServerTimeout.GenWithStackByArgs(txnRetryableMark)\n\tcase BoRegionMiss, BoUpdateLeader:\n\t\treturn ErrRegionUnavailable\n\tcase boServerBusy:\n\t\treturn ErrTiKVServerBusy\n\t}\n\treturn terror.ClassTiKV.New(mysql.ErrUnknown, mysql.MySQLErrName[mysql.ErrUnknown])\n}\n\n\/\/ Maximum total sleep time(in ms) for kv\/cop commands.\nconst (\n\tcopBuildTaskMaxBackoff = 5000\n\ttsoMaxBackoff = 15000\n\tscannerNextMaxBackoff = 20000\n\tbatchGetMaxBackoff = 20000\n\tcopNextMaxBackoff = 20000\n\tgetMaxBackoff = 20000\n\tprewriteMaxBackoff = 20000\n\tcleanupMaxBackoff = 20000\n\tGcOneRegionMaxBackoff = 20000\n\tGcResolveLockMaxBackoff = 100000\n\tdeleteRangeOneRegionMaxBackoff = 100000\n\trawkvMaxBackoff = 20000\n\tsplitRegionBackoff = 20000\n)\n\n\/\/ CommitMaxBackoff is max sleep time of the 'commit' command\nvar CommitMaxBackoff = 41000\n\n\/\/ Backoffer is a utility for retrying queries.\ntype Backoffer struct {\n\tctx context.Context\n\n\tfn map[backoffType]func(context.Context) int\n\tmaxSleep int\n\ttotalSleep int\n\terrors []error\n\ttypes []backoffType\n\tvars *kv.Variables\n}\n\n\/\/ NewBackoffer creates a Backoffer with maximum sleep time(in ms).\nfunc NewBackoffer(ctx context.Context, maxSleep int) *Backoffer {\n\treturn &Backoffer{\n\t\tctx: ctx,\n\t\tmaxSleep: maxSleep,\n\t\tvars: kv.DefaultVars,\n\t}\n}\n\n\/\/ WithVars sets the kv.Variables to the Backoffer and return it.\nfunc (b *Backoffer) WithVars(vars *kv.Variables) *Backoffer {\n\tb.vars = vars\n\treturn b\n}\n\n\/\/ Backoff sleeps a while base on the backoffType and records the error message.\n\/\/ It returns a retryable error if total sleep time exceeds maxSleep.\nfunc (b *Backoffer) Backoff(typ backoffType, err error) error {\n\tif strings.Contains(err.Error(), mismatchClusterID) {\n\t\tlog.Fatalf(\"critical error %v\", err)\n\t}\n\tselect {\n\tcase <-b.ctx.Done():\n\t\treturn errors.Trace(err)\n\tdefault:\n\t}\n\n\tmetrics.TiKVBackoffCounter.WithLabelValues(typ.String()).Inc()\n\t\/\/ Lazy initialize.\n\tif b.fn == nil {\n\t\tb.fn = make(map[backoffType]func(context.Context) int)\n\t}\n\tf, ok := b.fn[typ]\n\tif !ok {\n\t\tf = typ.createFn(b.vars)\n\t\tb.fn[typ] = f\n\t}\n\n\tb.totalSleep += f(b.ctx)\n\tb.types = append(b.types, typ)\n\n\tlog.Debugf(\"%v, retry later(totalsleep %dms, maxsleep %dms)\", err, b.totalSleep, b.maxSleep)\n\n\tb.errors = append(b.errors, errors.Errorf(\"%s at %s\", err.Error(), time.Now().Format(time.RFC3339Nano)))\n\tif b.maxSleep > 0 && b.totalSleep >= b.maxSleep {\n\t\terrMsg := fmt.Sprintf(\"backoffer.maxSleep %dms is exceeded, errors:\", b.maxSleep)\n\t\tfor i, err := range b.errors {\n\t\t\t\/\/ Print only last 3 errors for non-DEBUG log levels.\n\t\t\tif log.GetLevel() == log.DebugLevel || i >= len(b.errors)-3 {\n\t\t\t\terrMsg += \"\\n\" + err.Error()\n\t\t\t}\n\t\t}\n\t\tlog.Warn(errMsg)\n\t\t\/\/ Use the first backoff type to generate a MySQL error.\n\t\treturn b.types[0].TError()\n\t}\n\treturn nil\n}\n\nfunc (b *Backoffer) String() string {\n\tif b.totalSleep == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\" backoff(%dms %v)\", b.totalSleep, b.types)\n}\n\n\/\/ Clone creates a new Backoffer which keeps current Backoffer's sleep time and errors, and shares\n\/\/ current Backoffer's context.\nfunc (b *Backoffer) Clone() *Backoffer {\n\treturn &Backoffer{\n\t\tctx: b.ctx,\n\t\tmaxSleep: b.maxSleep,\n\t\ttotalSleep: b.totalSleep,\n\t\terrors: b.errors,\n\t\tvars: b.vars,\n\t}\n}\n\n\/\/ Fork creates a new Backoffer which keeps current Backoffer's sleep time and errors, and holds\n\/\/ a child context of current Backoffer's context.\nfunc (b *Backoffer) Fork() (*Backoffer, context.CancelFunc) {\n\tctx, cancel := context.WithCancel(b.ctx)\n\treturn &Backoffer{\n\t\tctx: ctx,\n\t\tmaxSleep: b.maxSleep,\n\t\ttotalSleep: b.totalSleep,\n\t\terrors: b.errors,\n\t\tvars: b.vars,\n\t}, cancel\n}\n<|endoftext|>"} {"text":"<commit_before>package billing\n\n\/\/ Assent represents an assent by a person on behalf of an account to an agreement\ntype Assent struct {\n\tAgreementID string `json:\"-\"`\n\tAccountID int `json:\"account_id\"`\n\tPersonID int `json:\"person_id\"`\n\t\/\/ Name is the full name of the person\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n}\n<commit_msg>fillout agreement id field<commit_after>package billing\n\n\/\/ Assent represents an assent by a person on behalf of an account to an agreement\ntype Assent struct {\n\tAgreementID string `json:\"agreement_id,omitempty\"`\n\tAccountID int `json:\"account_id\"`\n\tPersonID int `json:\"person_id\"`\n\t\/\/ Name is the full name of the person\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package analysis\n\ntype CachingTokenFilter struct {\n\t*TokenFilter\n}\n\nfunc NewCachingTokenFilter(input TokenStream) *CachingTokenFilter {\n\tpanic(\"not implemented yet\")\n}\n\nfunc (f *CachingTokenFilter) Reset() {\n\tpanic(\"not implemented yet\")\n}\n<commit_msg>implement CachingTokenFilter ctor<commit_after>package analysis\n\nimport (\n\t\"github.com\/balzaczyy\/golucene\/core\/util\"\n)\n\ntype CachingTokenFilter struct {\n\t*TokenFilter\n\tcache []*util.AttributeState\n\titerator func() (*util.AttributeState, bool)\n\tfinalState *util.AttributeState\n}\n\nfunc NewCachingTokenFilter(input TokenStream) *CachingTokenFilter {\n\treturn &CachingTokenFilter{\n\t\tTokenFilter: NewTokenFilter(input),\n\t}\n}\n\nfunc (f *CachingTokenFilter) Reset() {\n\tpanic(\"not implemented yet\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ This file should be consistent with pkg\/api\/annotation_key_constants.go.\n\npackage v1\n\nconst (\n\t\/\/ ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy\n\t\/\/ webhook backend fails.\n\tImagePolicyFailedOpenKey string = \"alpha.image-policy.k8s.io\/failed-open\"\n\n\t\/\/ PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation\n\tPodPresetOptOutAnnotationKey string = \"podpreset.admission.kubernetes.io\/exclude\"\n\n\t\/\/ MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods\n\tMirrorPodAnnotationKey string = \"kubernetes.io\/config.mirror\"\n\n\t\/\/ TolerationsAnnotationKey represents the key of tolerations data (json serialized)\n\t\/\/ in the Annotations of a Pod.\n\tTolerationsAnnotationKey string = \"scheduler.alpha.kubernetes.io\/tolerations\"\n\n\t\/\/ TaintsAnnotationKey represents the key of taints data (json serialized)\n\t\/\/ in the Annotations of a Node.\n\tTaintsAnnotationKey string = \"scheduler.alpha.kubernetes.io\/taints\"\n\n\t\/\/ SeccompPodAnnotationKey represents the key of a seccomp profile applied\n\t\/\/ to all containers of a pod.\n\tSeccompPodAnnotationKey string = \"seccomp.security.alpha.kubernetes.io\/pod\"\n\n\t\/\/ SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied\n\t\/\/ to one container of a pod.\n\tSeccompContainerAnnotationKeyPrefix string = \"container.seccomp.security.alpha.kubernetes.io\/\"\n\n\t\/\/ CreatedByAnnotation represents the key used to store the spec(json)\n\t\/\/ used to create the resource.\n\tCreatedByAnnotation = \"kubernetes.io\/created-by\"\n\n\t\/\/ PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)\n\t\/\/ in the Annotations of a Node.\n\tPreferAvoidPodsAnnotationKey string = \"scheduler.alpha.kubernetes.io\/preferAvoidPods\"\n\n\t\/\/ SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure\n\t\/\/ container of a pod. The annotation value is a comma separated list of sysctl_name=value\n\t\/\/ key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by\n\t\/\/ the kubelet. Pods with other sysctls will fail to launch.\n\tSysctlsPodAnnotationKey string = \"security.alpha.kubernetes.io\/sysctls\"\n\n\t\/\/ UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure\n\t\/\/ container of a pod. The annotation value is a comma separated list of sysctl_name=value\n\t\/\/ key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly\n\t\/\/ namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use\n\t\/\/ is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet\n\t\/\/ will fail to launch.\n\tUnsafeSysctlsPodAnnotationKey string = \"security.alpha.kubernetes.io\/unsafe-sysctls\"\n\n\t\/\/ ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache\n\t\/\/ an object (e.g. secret, config map) before fetching it again from apiserver.\n\t\/\/ This annotation can be attached to node.\n\tObjectTTLAnnotationKey string = \"node.alpha.kubernetes.io\/ttl\"\n\n\t\/\/ annotation key prefix used to identify non-convertible json paths.\n\tNonConvertibleAnnotationPrefix = \"non-convertible.kubernetes.io\"\n\n\tkubectlPrefix = \"kubectl.kubernetes.io\/\"\n\n\t\/\/ LastAppliedConfigAnnotation is the annotation used to store the previous\n\t\/\/ configuration of a resource for use in a three way diff by UpdateApplyAnnotation.\n\tLastAppliedConfigAnnotation = kubectlPrefix + \"last-applied-configuration\"\n\n\t\/\/ AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers\n\t\/\/\n\t\/\/ It should be a comma-separated list of CIDRs, e.g. `0.0.0.0\/0` to\n\t\/\/ allow full access (the default) or `18.0.0.0\/8,56.0.0.0\/8` to allow\n\t\/\/ access only from the CIDRs currently allocated to MIT & the USPS.\n\t\/\/\n\t\/\/ Not all cloud providers support this annotation, though AWS & GCE do.\n\tAnnotationLoadBalancerSourceRangesKey = \"service.beta.kubernetes.io\/load-balancer-source-ranges\"\n)\n<commit_msg>mark created-by annotation as deprecated<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ This file should be consistent with pkg\/api\/annotation_key_constants.go.\n\npackage v1\n\nconst (\n\t\/\/ ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy\n\t\/\/ webhook backend fails.\n\tImagePolicyFailedOpenKey string = \"alpha.image-policy.k8s.io\/failed-open\"\n\n\t\/\/ PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation\n\tPodPresetOptOutAnnotationKey string = \"podpreset.admission.kubernetes.io\/exclude\"\n\n\t\/\/ MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods\n\tMirrorPodAnnotationKey string = \"kubernetes.io\/config.mirror\"\n\n\t\/\/ TolerationsAnnotationKey represents the key of tolerations data (json serialized)\n\t\/\/ in the Annotations of a Pod.\n\tTolerationsAnnotationKey string = \"scheduler.alpha.kubernetes.io\/tolerations\"\n\n\t\/\/ TaintsAnnotationKey represents the key of taints data (json serialized)\n\t\/\/ in the Annotations of a Node.\n\tTaintsAnnotationKey string = \"scheduler.alpha.kubernetes.io\/taints\"\n\n\t\/\/ SeccompPodAnnotationKey represents the key of a seccomp profile applied\n\t\/\/ to all containers of a pod.\n\tSeccompPodAnnotationKey string = \"seccomp.security.alpha.kubernetes.io\/pod\"\n\n\t\/\/ SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied\n\t\/\/ to one container of a pod.\n\tSeccompContainerAnnotationKeyPrefix string = \"container.seccomp.security.alpha.kubernetes.io\/\"\n\n\t\/\/ CreatedByAnnotation represents the key used to store the spec(json)\n\t\/\/ used to create the resource.\n\t\/\/ This field is deprecated in favor of ControllerRef (see #44407).\n\t\/\/ TODO(#50720): Remove this field in v1.9.\n\tCreatedByAnnotation = \"kubernetes.io\/created-by\"\n\n\t\/\/ PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)\n\t\/\/ in the Annotations of a Node.\n\tPreferAvoidPodsAnnotationKey string = \"scheduler.alpha.kubernetes.io\/preferAvoidPods\"\n\n\t\/\/ SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure\n\t\/\/ container of a pod. The annotation value is a comma separated list of sysctl_name=value\n\t\/\/ key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by\n\t\/\/ the kubelet. Pods with other sysctls will fail to launch.\n\tSysctlsPodAnnotationKey string = \"security.alpha.kubernetes.io\/sysctls\"\n\n\t\/\/ UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure\n\t\/\/ container of a pod. The annotation value is a comma separated list of sysctl_name=value\n\t\/\/ key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly\n\t\/\/ namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use\n\t\/\/ is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet\n\t\/\/ will fail to launch.\n\tUnsafeSysctlsPodAnnotationKey string = \"security.alpha.kubernetes.io\/unsafe-sysctls\"\n\n\t\/\/ ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache\n\t\/\/ an object (e.g. secret, config map) before fetching it again from apiserver.\n\t\/\/ This annotation can be attached to node.\n\tObjectTTLAnnotationKey string = \"node.alpha.kubernetes.io\/ttl\"\n\n\t\/\/ annotation key prefix used to identify non-convertible json paths.\n\tNonConvertibleAnnotationPrefix = \"non-convertible.kubernetes.io\"\n\n\tkubectlPrefix = \"kubectl.kubernetes.io\/\"\n\n\t\/\/ LastAppliedConfigAnnotation is the annotation used to store the previous\n\t\/\/ configuration of a resource for use in a three way diff by UpdateApplyAnnotation.\n\tLastAppliedConfigAnnotation = kubectlPrefix + \"last-applied-configuration\"\n\n\t\/\/ AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers\n\t\/\/\n\t\/\/ It should be a comma-separated list of CIDRs, e.g. `0.0.0.0\/0` to\n\t\/\/ allow full access (the default) or `18.0.0.0\/8,56.0.0.0\/8` to allow\n\t\/\/ access only from the CIDRs currently allocated to MIT & the USPS.\n\t\/\/\n\t\/\/ Not all cloud providers support this annotation, though AWS & GCE do.\n\tAnnotationLoadBalancerSourceRangesKey = \"service.beta.kubernetes.io\/load-balancer-source-ranges\"\n)\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ ErrDuplicatePath occurs when a tar archive has more than one entry for the\n\/\/ same file path\nvar ErrDuplicatePath = errors.New(\"duplicates of file paths not supported\")\n\n\/\/ Packer describes the methods to pack Entries to a storage destination\ntype Packer interface {\n\t\/\/ AddEntry packs the Entry and returns its position\n\tAddEntry(e Entry) (int, error)\n}\n\n\/\/ Unpacker describes the methods to read Entries from a source\ntype Unpacker interface {\n\t\/\/ Next returns the next Entry being unpacked, or error, until io.EOF\n\tNext() (*Entry, error)\n}\n\n\/* TODO(vbatts) figure out a good model for this\ntype PackUnpacker interface {\n\tPacker\n\tUnpacker\n}\n*\/\n\ntype jsonUnpacker struct {\n\tr io.Reader\n\tb *bufio.Reader\n\tisEOF bool\n\tseen seenNames\n}\n\nfunc (jup *jsonUnpacker) Next() (*Entry, error) {\n\tvar e Entry\n\tif jup.isEOF {\n\t\t\/\/ since ReadBytes() will return read bytes AND an EOF, we handle it this\n\t\t\/\/ round-a-bout way so we can Unmarshal the tail with relevant errors, but\n\t\t\/\/ still get an io.EOF when the stream is ended.\n\t\treturn nil, io.EOF\n\t}\n\tline, err := jup.b.ReadBytes('\\n')\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t} else if err == io.EOF {\n\t\tjup.isEOF = true\n\t}\n\n\terr = json.Unmarshal(line, &e)\n\tif err != nil && jup.isEOF {\n\t\t\/\/ if the remainder actually _wasn't_ a remaining json structure, then just EOF\n\t\treturn nil, io.EOF\n\t}\n\n\t\/\/ check for dup name\n\tif e.Type == FileType {\n\t\tcName := filepath.Clean(e.GetName())\n\t\tif _, ok := jup.seen[cName]; ok {\n\t\t\treturn nil, ErrDuplicatePath\n\t\t}\n\t\tjup.seen[cName] = struct{}{}\n\t}\n\n\treturn &e, err\n}\n\n\/\/ NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and\n\/\/ FileType) as a json document.\n\/\/\n\/\/ Each Entry read are expected to be delimited by new line.\nfunc NewJSONUnpacker(r io.Reader) Unpacker {\n\treturn &jsonUnpacker{\n\t\tr: r,\n\t\tb: bufio.NewReader(r),\n\t\tseen: seenNames{},\n\t}\n}\n\ntype jsonPacker struct {\n\tw io.Writer\n\te *json.Encoder\n\tpos int\n\tseen seenNames\n}\n\ntype seenNames map[string]struct{}\n\nfunc (jp *jsonPacker) AddEntry(e Entry) (int, error) {\n\t\/\/ if Name is not valid utf8, switch it to raw first.\n\tif e.Name != \"\" {\n\t\tif !utf8.ValidString(e.Name) {\n\t\t\te.NameRaw = []byte(e.Name)\n\t\t\te.Name = \"\"\n\t\t}\n\t}\n\n\t\/\/ check early for dup name\n\tif e.Type == FileType {\n\t\tcName := filepath.Clean(e.GetName())\n\t\tif _, ok := jp.seen[cName]; ok {\n\t\t\treturn -1, ErrDuplicatePath\n\t\t}\n\t\tjp.seen[cName] = struct{}{}\n\t}\n\n\te.Position = jp.pos\n\terr := jp.e.Encode(e)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\t\/\/ made it this far, increment now\n\tjp.pos++\n\treturn e.Position, nil\n}\n\n\/\/ NewJSONPacker provides a Packer that writes each Entry (SegmentType and\n\/\/ FileType) as a json document.\n\/\/\n\/\/ The Entries are delimited by new line.\nfunc NewJSONPacker(w io.Writer) Packer {\n\treturn &jsonPacker{\n\t\tw: w,\n\t\te: json.NewEncoder(w),\n\t\tseen: seenNames{},\n\t}\n}\n\n\/*\nTODO(vbatts) perhaps have a more compact packer\/unpacker, maybe using msgapck\n(https:\/\/github.com\/ugorji\/go)\n\n\nEven though, since our jsonUnpacker and jsonPacker just take\nio.Reader\/io.Writer, then we can get away with passing them a\ngzip.Reader\/gzip.Writer\n*\/\n<commit_msg>Optimize JSON decoding<commit_after>package storage\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ ErrDuplicatePath occurs when a tar archive has more than one entry for the\n\/\/ same file path\nvar ErrDuplicatePath = errors.New(\"duplicates of file paths not supported\")\n\n\/\/ Packer describes the methods to pack Entries to a storage destination\ntype Packer interface {\n\t\/\/ AddEntry packs the Entry and returns its position\n\tAddEntry(e Entry) (int, error)\n}\n\n\/\/ Unpacker describes the methods to read Entries from a source\ntype Unpacker interface {\n\t\/\/ Next returns the next Entry being unpacked, or error, until io.EOF\n\tNext() (*Entry, error)\n}\n\n\/* TODO(vbatts) figure out a good model for this\ntype PackUnpacker interface {\n\tPacker\n\tUnpacker\n}\n*\/\n\ntype jsonUnpacker struct {\n\tseen seenNames\n\tdec *json.Decoder\n}\n\nfunc (jup *jsonUnpacker) Next() (*Entry, error) {\n\tvar e Entry\n\terr := jup.dec.Decode(&e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ check for dup name\n\tif e.Type == FileType {\n\t\tcName := filepath.Clean(e.GetName())\n\t\tif _, ok := jup.seen[cName]; ok {\n\t\t\treturn nil, ErrDuplicatePath\n\t\t}\n\t\tjup.seen[cName] = struct{}{}\n\t}\n\n\treturn &e, err\n}\n\n\/\/ NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and\n\/\/ FileType) as a json document.\n\/\/\n\/\/ Each Entry read are expected to be delimited by new line.\nfunc NewJSONUnpacker(r io.Reader) Unpacker {\n\treturn &jsonUnpacker{\n\t\tdec: json.NewDecoder(r),\n\t\tseen: seenNames{},\n\t}\n}\n\ntype jsonPacker struct {\n\tw io.Writer\n\te *json.Encoder\n\tpos int\n\tseen seenNames\n}\n\ntype seenNames map[string]struct{}\n\nfunc (jp *jsonPacker) AddEntry(e Entry) (int, error) {\n\t\/\/ if Name is not valid utf8, switch it to raw first.\n\tif e.Name != \"\" {\n\t\tif !utf8.ValidString(e.Name) {\n\t\t\te.NameRaw = []byte(e.Name)\n\t\t\te.Name = \"\"\n\t\t}\n\t}\n\n\t\/\/ check early for dup name\n\tif e.Type == FileType {\n\t\tcName := filepath.Clean(e.GetName())\n\t\tif _, ok := jp.seen[cName]; ok {\n\t\t\treturn -1, ErrDuplicatePath\n\t\t}\n\t\tjp.seen[cName] = struct{}{}\n\t}\n\n\te.Position = jp.pos\n\terr := jp.e.Encode(e)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\t\/\/ made it this far, increment now\n\tjp.pos++\n\treturn e.Position, nil\n}\n\n\/\/ NewJSONPacker provides a Packer that writes each Entry (SegmentType and\n\/\/ FileType) as a json document.\n\/\/\n\/\/ The Entries are delimited by new line.\nfunc NewJSONPacker(w io.Writer) Packer {\n\treturn &jsonPacker{\n\t\tw: w,\n\t\te: json.NewEncoder(w),\n\t\tseen: seenNames{},\n\t}\n}\n\n\/*\nTODO(vbatts) perhaps have a more compact packer\/unpacker, maybe using msgapck\n(https:\/\/github.com\/ugorji\/go)\n\n\nEven though, since our jsonUnpacker and jsonPacker just take\nio.Reader\/io.Writer, then we can get away with passing them a\ngzip.Reader\/gzip.Writer\n*\/\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package main provides a simple UBER hypermedia drive todo list server\npackage main\n\nimport (\n\t\"container\/list\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype ContextHandler interface {\n\tServeHTTPWithContext(context.Context, http.ResponseWriter, *http.Request)\n}\n\ntype ContextHandlerFunc func(context.Context, http.ResponseWriter, *http.Request)\n\nfunc (h ContextHandlerFunc) ServeHTTPWithContext(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\th(ctx, w, req)\n}\n\ntype ContextAdapter struct {\n\tctx context.Context\n\thandler ContextHandler\n}\n\nfunc (ca ContextAdapter) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tca.handler.ServeHTTPWithContext(ca.ctx, w, req)\n}\n\ntype udata struct {\n\tID string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tRel []string `json:\"rel,omitempty\"`\n\tLabel string `json:\"label,omitempty\"`\n\tURL string `json:\"url,omitempty\"`\n\tTemplate bool `json:\"template,omitempty\"`\n\tAction string `json:\"action,omitempty\"`\n\tTransclude bool `json:\"transclude,omitempty\"`\n\tModel string `json:\"model,omitempty\"`\n\tSending string `json:\"sending,omitempty\"`\n\tAccepting []string `json:\"accepting,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tData []udata `json:\"data,omitempty\"`\n}\n\nfunc (ud *udoc) appendItem(taskid, value string) {\n\ttask := udata{ID: taskid,\n\t\tRel: []string{\"item\"},\n\t\tName: \"tasks\",\n\t\tData: []udata{\n\t\t\tudata{Rel: []string{\"complete\"}, URL: \"\/tasks\/complete\/\", Model: fmt.Sprintf(\"id=%s\", taskid), Action: \"append\"},\n\t\t\tudata{Name: \"text\", Value: value}}}\n\n\tud.Uber.Data[1].Data = append(ud.Uber.Data[1].Data, task)\n}\n\ntype ubody struct {\n\tVersion string `json:\"version\"`\n\tData []udata `json:\"data,omitempty\"`\n\tError []udata `json:\"error,omitempty\"`\n}\n\ntype udoc struct {\n\tUber ubody `json:\"uber\"`\n}\n\nvar (\n\ttaskctx = context.Background()\n)\n\nfunc init() {\n\ttaskctx = context.WithValue(taskctx, \"tasks\", list.New())\n\ttaskctx = context.WithValue(taskctx, \"logger\", log.New(os.Stdout, \"taskd: \", log.LstdFlags))\n\thttp.Handle(\"\/\", handlers.CompressHandler(handlers.LoggingHandler(os.Stdout, router())))\n}\n\nfunc main() {\n\thttp.ListenAndServe(\":3006\", nil)\n}\n\nfunc router() *mux.Router {\n\tr := mux.NewRouter()\n\tr.Handle(\"\/tasks\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(tasklist)})).Methods(\"GET\")\n\tr.Handle(\"\/tasks\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(taskadd)})).Methods(\"POST\")\n\tr.Handle(\"\/tasks\/complete\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(taskcomplete)})).Methods(\"POST\")\n\tr.Handle(\"\/tasks\/search\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(tasksearch)})).Methods(\"GET\")\n\treturn r\n}\n\nfunc taskadd(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\tre := regexp.MustCompile(\"text=(([[:word:]]|[[:space:]])*)\")\n\tsm := re.FindStringSubmatch(string(body))\n\tif sm == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"Unrecognized add task body\"))\n\t\treturn\n\t}\n\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\ttasks.PushBack(sm[1])\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc taskcomplete(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\tre := regexp.MustCompile(\"id=[[:alpha:]]+([[:digit:]]+)\")\n\tsm := re.FindStringSubmatch(string(body))\n\tif sm == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"Unrecognized complete text body\"))\n\t\treturn\n\t}\n\n\tcompleted := false\n\ttaskid, err := strconv.Atoi(sm[1])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\n\tif tasks.Len() < taskid {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"No such task\"))\n\t\treturn\n\t}\n\n\tfor t, i := tasks.Front(), 1; t != nil; t = t.Next() {\n\t\tif i == taskid {\n\t\t\tcompleted = true\n\t\t\ttasks.Remove(t)\n\t\t}\n\t\ti++\n\t}\n\n\tif !completed {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"No such task\"))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc tasklist(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\n\tresp := mkEmptylist()\n\tif resp == nil {\n\t\tpanic(\"can't generate base UBER document\")\n\t}\n\n\tfor t, i := tasks.Front(), 0; t != nil; t = t.Next() {\n\t\tresp.appendItem(fmt.Sprintf(\"task%d\", i+1), t.Value.(string))\n\t\ti++\n\t}\n\n\tbs, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}\n\nfunc tasksearch(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\n\tqt := req.URL.Query().Get(\"text\")\n\tif len(qt) <= 0 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"Missing text parameter\"))\n\t\treturn\n\t}\n\n\tresp := mkEmptylist()\n\tif resp == nil {\n\t\tpanic(\"can't generate base UBER document\")\n\t}\n\n\tfor t, i := tasks.Front(), 0; t != nil; t = t.Next() {\n\t\tif qt == t.Value.(string) {\n\t\t\tresp.appendItem(fmt.Sprintf(\"task%d\", i+1), t.Value.(string))\n\t\t\ti++\n\t\t}\n\t}\n\n\tbs, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}\n\nfunc mkEmptylist() *udoc {\n\tlinks := udata{\n\t\tID: \"links\",\n\t\tData: []udata{\n\t\t\tudata{ID: \"alps\",\n\t\t\t\tRel: []string{\"profile\"},\n\t\t\t\tURL: \"\/tasks-alps.xml\",\n\t\t\t\tAction: \"read\",\n\t\t\t\tData: []udata{}},\n\t\t\tudata{ID: \"list\",\n\t\t\t\tName: \"links\",\n\t\t\t\tRel: []string{\"collection\"},\n\t\t\t\tURL: \"\/tasks\/\",\n\t\t\t\tAction: \"read\",\n\t\t\t\tData: []udata{}},\n\t\t\tudata{ID: \"search\",\n\t\t\t\tName: \"links\",\n\t\t\t\tRel: []string{\"search\"},\n\t\t\t\tURL: \"\/tasks\/search\",\n\t\t\t\tAction: \"read\",\n\t\t\t\tModel: \"?text={text}\",\n\t\t\t\tData: []udata{}},\n\t\t\tudata{ID: \"add\",\n\t\t\t\tName: \"links\",\n\t\t\t\tRel: []string{\"add\"},\n\t\t\t\tURL: \"\/tasks\/\",\n\t\t\t\tAction: \"append\",\n\t\t\t\tModel: \"text={text}\",\n\t\t\t\tData: []udata{}}}}\n\n\treturn &udoc{ubody{Version: \"1.0\", Data: []udata{links, udata{ID: \"tasks\", Data: []udata{}}}, Error: []udata{}}}\n}\n\nfunc mkError(name, rel, value string) []byte {\n\tbs, err := json.Marshal(udoc{ubody{Version: \"1.0\", Error: []udata{udata{Name: name, Rel: []string{rel}, Value: value}}}})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn bs\n}\n<commit_msg>Move appendItem<commit_after>\/\/ Package main provides a simple UBER hypermedia drive todo list server\npackage main\n\nimport (\n\t\"container\/list\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype ContextHandler interface {\n\tServeHTTPWithContext(context.Context, http.ResponseWriter, *http.Request)\n}\n\ntype ContextHandlerFunc func(context.Context, http.ResponseWriter, *http.Request)\n\nfunc (h ContextHandlerFunc) ServeHTTPWithContext(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\th(ctx, w, req)\n}\n\ntype ContextAdapter struct {\n\tctx context.Context\n\thandler ContextHandler\n}\n\nfunc (ca ContextAdapter) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tca.handler.ServeHTTPWithContext(ca.ctx, w, req)\n}\n\ntype udata struct {\n\tID string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tRel []string `json:\"rel,omitempty\"`\n\tLabel string `json:\"label,omitempty\"`\n\tURL string `json:\"url,omitempty\"`\n\tTemplate bool `json:\"template,omitempty\"`\n\tAction string `json:\"action,omitempty\"`\n\tTransclude bool `json:\"transclude,omitempty\"`\n\tModel string `json:\"model,omitempty\"`\n\tSending string `json:\"sending,omitempty\"`\n\tAccepting []string `json:\"accepting,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tData []udata `json:\"data,omitempty\"`\n}\n\ntype ubody struct {\n\tVersion string `json:\"version\"`\n\tData []udata `json:\"data,omitempty\"`\n\tError []udata `json:\"error,omitempty\"`\n}\n\ntype udoc struct {\n\tUber ubody `json:\"uber\"`\n}\n\nfunc (ud *udoc) appendItem(taskid, value string) {\n\ttask := udata{ID: taskid,\n\t\tRel: []string{\"item\"},\n\t\tName: \"tasks\",\n\t\tData: []udata{\n\t\t\tudata{Rel: []string{\"complete\"}, URL: \"\/tasks\/complete\/\", Model: fmt.Sprintf(\"id=%s\", taskid), Action: \"append\"},\n\t\t\tudata{Name: \"text\", Value: value}}}\n\n\tud.Uber.Data[1].Data = append(ud.Uber.Data[1].Data, task)\n}\n\nvar (\n\ttaskctx = context.Background()\n)\n\nfunc init() {\n\ttaskctx = context.WithValue(taskctx, \"tasks\", list.New())\n\ttaskctx = context.WithValue(taskctx, \"logger\", log.New(os.Stdout, \"taskd: \", log.LstdFlags))\n\thttp.Handle(\"\/\", handlers.CompressHandler(handlers.LoggingHandler(os.Stdout, router())))\n}\n\nfunc main() {\n\thttp.ListenAndServe(\":3006\", nil)\n}\n\nfunc router() *mux.Router {\n\tr := mux.NewRouter()\n\tr.Handle(\"\/tasks\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(tasklist)})).Methods(\"GET\")\n\tr.Handle(\"\/tasks\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(taskadd)})).Methods(\"POST\")\n\tr.Handle(\"\/tasks\/complete\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(taskcomplete)})).Methods(\"POST\")\n\tr.Handle(\"\/tasks\/search\", http.Handler(ContextAdapter{ctx: taskctx, handler: ContextHandlerFunc(tasksearch)})).Methods(\"GET\")\n\treturn r\n}\n\nfunc taskadd(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\tre := regexp.MustCompile(\"text=(([[:word:]]|[[:space:]])*)\")\n\tsm := re.FindStringSubmatch(string(body))\n\tif sm == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"Unrecognized add task body\"))\n\t\treturn\n\t}\n\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\ttasks.PushBack(sm[1])\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc taskcomplete(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\tre := regexp.MustCompile(\"id=[[:alpha:]]+([[:digit:]]+)\")\n\tsm := re.FindStringSubmatch(string(body))\n\tif sm == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"Unrecognized complete text body\"))\n\t\treturn\n\t}\n\n\tcompleted := false\n\ttaskid, err := strconv.Atoi(sm[1])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\n\tif tasks.Len() < taskid {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"No such task\"))\n\t\treturn\n\t}\n\n\tfor t, i := tasks.Front(), 1; t != nil; t = t.Next() {\n\t\tif i == taskid {\n\t\t\tcompleted = true\n\t\t\ttasks.Remove(t)\n\t\t}\n\t\ti++\n\t}\n\n\tif !completed {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"No such task\"))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc tasklist(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\n\tresp := mkEmptylist()\n\tif resp == nil {\n\t\tpanic(\"can't generate base UBER document\")\n\t}\n\n\tfor t, i := tasks.Front(), 0; t != nil; t = t.Next() {\n\t\tresp.appendItem(fmt.Sprintf(\"task%d\", i+1), t.Value.(string))\n\t\ti++\n\t}\n\n\tbs, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}\n\nfunc tasksearch(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\ttasks := ctx.Value(\"tasks\").(*list.List)\n\n\tqt := req.URL.Query().Get(\"text\")\n\tif len(qt) <= 0 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(mkError(\"ClientError\", \"reason\", \"Missing text parameter\"))\n\t\treturn\n\t}\n\n\tresp := mkEmptylist()\n\tif resp == nil {\n\t\tpanic(\"can't generate base UBER document\")\n\t}\n\n\tfor t, i := tasks.Front(), 0; t != nil; t = t.Next() {\n\t\tif qt == t.Value.(string) {\n\t\t\tresp.appendItem(fmt.Sprintf(\"task%d\", i+1), t.Value.(string))\n\t\t\ti++\n\t\t}\n\t}\n\n\tbs, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(mkError(\"ServerError\", \"reason\", \"Cannot read HTTP request body\"))\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}\n\nfunc mkEmptylist() *udoc {\n\tlinks := udata{\n\t\tID: \"links\",\n\t\tData: []udata{\n\t\t\tudata{ID: \"alps\",\n\t\t\t\tRel: []string{\"profile\"},\n\t\t\t\tURL: \"\/tasks-alps.xml\",\n\t\t\t\tAction: \"read\",\n\t\t\t\tData: []udata{}},\n\t\t\tudata{ID: \"list\",\n\t\t\t\tName: \"links\",\n\t\t\t\tRel: []string{\"collection\"},\n\t\t\t\tURL: \"\/tasks\/\",\n\t\t\t\tAction: \"read\",\n\t\t\t\tData: []udata{}},\n\t\t\tudata{ID: \"search\",\n\t\t\t\tName: \"links\",\n\t\t\t\tRel: []string{\"search\"},\n\t\t\t\tURL: \"\/tasks\/search\",\n\t\t\t\tAction: \"read\",\n\t\t\t\tModel: \"?text={text}\",\n\t\t\t\tData: []udata{}},\n\t\t\tudata{ID: \"add\",\n\t\t\t\tName: \"links\",\n\t\t\t\tRel: []string{\"add\"},\n\t\t\t\tURL: \"\/tasks\/\",\n\t\t\t\tAction: \"append\",\n\t\t\t\tModel: \"text={text}\",\n\t\t\t\tData: []udata{}}}}\n\n\treturn &udoc{ubody{Version: \"1.0\", Data: []udata{links, udata{ID: \"tasks\", Data: []udata{}}}, Error: []udata{}}}\n}\n\nfunc mkError(name, rel, value string) []byte {\n\tbs, err := json.Marshal(udoc{ubody{Version: \"1.0\", Error: []udata{udata{Name: name, Rel: []string{rel}, Value: value}}}})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn bs\n}\n<|endoftext|>"} {"text":"<commit_before>package lsp\n\nimport (\n\t\"context\"\n\t\"path\"\n\n\t\"github.com\/sourcegraph\/go-lsp\"\n\n\t\"github.com\/thought-machine\/please\/src\/core\"\n\t\"github.com\/thought-machine\/please\/src\/parse\/asp\"\n)\n\n\/\/ diagSource\nconst diagSource = \"plz tool langserver\"\n\nfunc (h *Handler) diagnose(d *doc) {\n\tlast := []lsp.Diagnostic{}\n\tfor ast := range d.Diagnostics {\n\t\tif diags := h.diagnostics(d, ast); !diagnosticsEqual(diags, last) {\n\t\t\th.Conn.Notify(context.Background(), \"textDocument\/publishDiagnostics\", &lsp.PublishDiagnosticsParams{\n\t\t\t\tURI: lsp.DocumentURI(\"file:\/\/\" + path.Join(h.root, d.Filename)),\n\t\t\t\tDiagnostics: diags,\n\t\t\t})\n\t\t\tlast = diags\n\t\t}\n\t}\n}\n\nfunc (h *Handler) diagnostics(d *doc, ast []*asp.Statement) []lsp.Diagnostic {\n\tdiags := []lsp.Diagnostic{}\n\tpkgLabel := core.BuildLabel{\n\t\tPackageName: path.Dir(d.Filename),\n\t\tName: \"all\",\n\t}\n\tasp.WalkAST(ast, func(expr *asp.Expression) bool {\n\t\tif expr.Val != nil && expr.Val.String != \"\" {\n\t\t\tif s := stringLiteral(expr.Val.String); core.LooksLikeABuildLabel(s) {\n\t\t\t\tif l, err := core.TryParseBuildLabel(s, pkgLabel.PackageName); err == nil {\n\t\t\t\t\tif t := h.state.Graph.Target(l); t != nil {\n\t\t\t\t\t\tif !pkgLabel.CanSee(h.state, t) {\n\t\t\t\t\t\t\tdiags = append(diags, lsp.Diagnostic{\n\t\t\t\t\t\t\t\tRange: lsp.Range{\n\t\t\t\t\t\t\t\t\t\/\/ -1 because asp.Positions are 1-indexed but lsp Positions are 0-indexed.\n\t\t\t\t\t\t\t\t\t\/\/ Further fiddling on Column to fix quotes.\n\t\t\t\t\t\t\t\t\tStart: lsp.Position{Line: expr.Pos.Line - 1, Character: expr.Pos.Column},\n\t\t\t\t\t\t\t\t\tEnd: lsp.Position{Line: expr.EndPos.Line - 1, Character: expr.EndPos.Column - 1},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tSeverity: lsp.Error,\n\t\t\t\t\t\t\t\tSource: diagSource,\n\t\t\t\t\t\t\t\tMessage: \"Target \" + t.Label.String() + \" is not visible to this package\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if h.state.Graph.PackageByLabel(l) != nil {\n\t\t\t\t\t\t\/\/ Package exists but target doesn't, issue a diagnostic for that.\n\t\t\t\t\t\tdiags = append(diags, lsp.Diagnostic{\n\t\t\t\t\t\t\tRange: lsp.Range{\n\t\t\t\t\t\t\t\tStart: lsp.Position{Line: expr.Pos.Line - 1, Character: expr.Pos.Column},\n\t\t\t\t\t\t\t\tEnd: lsp.Position{Line: expr.EndPos.Line - 1, Character: expr.EndPos.Column - 1},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSeverity: lsp.Error,\n\t\t\t\t\t\t\tSource: diagSource,\n\t\t\t\t\t\t\tMessage: \"Target \" + s + \" does not exist\",\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\treturn diags\n}\n\nfunc diagnosticsEqual(a, b []lsp.Diagnostic) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, d := range a {\n\t\tif d != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>Don't report diagnostics for pseudo-labels<commit_after>package lsp\n\nimport (\n\t\"context\"\n\t\"path\"\n\n\t\"github.com\/sourcegraph\/go-lsp\"\n\n\t\"github.com\/thought-machine\/please\/src\/core\"\n\t\"github.com\/thought-machine\/please\/src\/parse\/asp\"\n)\n\n\/\/ diagSource\nconst diagSource = \"plz tool langserver\"\n\nfunc (h *Handler) diagnose(d *doc) {\n\tlast := []lsp.Diagnostic{}\n\tfor ast := range d.Diagnostics {\n\t\tif diags := h.diagnostics(d, ast); !diagnosticsEqual(diags, last) {\n\t\t\th.Conn.Notify(context.Background(), \"textDocument\/publishDiagnostics\", &lsp.PublishDiagnosticsParams{\n\t\t\t\tURI: lsp.DocumentURI(\"file:\/\/\" + path.Join(h.root, d.Filename)),\n\t\t\t\tDiagnostics: diags,\n\t\t\t})\n\t\t\tlast = diags\n\t\t}\n\t}\n}\n\nfunc (h *Handler) diagnostics(d *doc, ast []*asp.Statement) []lsp.Diagnostic {\n\tdiags := []lsp.Diagnostic{}\n\tpkgLabel := core.BuildLabel{\n\t\tPackageName: path.Dir(d.Filename),\n\t\tName: \"all\",\n\t}\n\tasp.WalkAST(ast, func(expr *asp.Expression) bool {\n\t\tif expr.Val != nil && expr.Val.String != \"\" {\n\t\t\tif s := stringLiteral(expr.Val.String); core.LooksLikeABuildLabel(s) {\n\t\t\t\tif l, err := core.TryParseBuildLabel(s, pkgLabel.PackageName); err == nil {\n\t\t\t\t\tif l.IsAllTargets() || l.IsAllSubpackages() {\n\t\t\t\t\t\t\/\/ Can't emit any useful info for these.\n\t\t\t\t\t\t\/\/ TODO(peterebden): If we know what argument we were in we could emit info\n\t\t\t\t\t\t\/\/ describing whether this is appropriate or not.\n\t\t\t\t\t\treturn false\n\t\t\t\t\t} else if t := h.state.Graph.Target(l); t != nil {\n\t\t\t\t\t\tif !pkgLabel.CanSee(h.state, t) {\n\t\t\t\t\t\t\tdiags = append(diags, lsp.Diagnostic{\n\t\t\t\t\t\t\t\tRange: lsp.Range{\n\t\t\t\t\t\t\t\t\t\/\/ -1 because asp.Positions are 1-indexed but lsp Positions are 0-indexed.\n\t\t\t\t\t\t\t\t\t\/\/ Further fiddling on Column to fix quotes.\n\t\t\t\t\t\t\t\t\tStart: lsp.Position{Line: expr.Pos.Line - 1, Character: expr.Pos.Column},\n\t\t\t\t\t\t\t\t\tEnd: lsp.Position{Line: expr.EndPos.Line - 1, Character: expr.EndPos.Column - 1},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tSeverity: lsp.Error,\n\t\t\t\t\t\t\t\tSource: diagSource,\n\t\t\t\t\t\t\t\tMessage: \"Target \" + t.Label.String() + \" is not visible to this package\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if h.state.Graph.PackageByLabel(l) != nil {\n\t\t\t\t\t\t\/\/ Package exists but target doesn't, issue a diagnostic for that.\n\t\t\t\t\t\tdiags = append(diags, lsp.Diagnostic{\n\t\t\t\t\t\t\tRange: lsp.Range{\n\t\t\t\t\t\t\t\tStart: lsp.Position{Line: expr.Pos.Line - 1, Character: expr.Pos.Column},\n\t\t\t\t\t\t\t\tEnd: lsp.Position{Line: expr.EndPos.Line - 1, Character: expr.EndPos.Column - 1},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSeverity: lsp.Error,\n\t\t\t\t\t\t\tSource: diagSource,\n\t\t\t\t\t\t\tMessage: \"Target \" + s + \" does not exist\",\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\treturn diags\n}\n\nfunc diagnosticsEqual(a, b []lsp.Diagnostic) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, d := range a {\n\t\tif d != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\tvlc \"github.com\/adrg\/libvlc-go\/v3\"\n\t\"github.com\/gotk3\/gotk3\/cairo\"\n\t\"github.com\/gotk3\/gotk3\/glib\"\n\t\"github.com\/gotk3\/gotk3\/gtk\"\n)\n\nconst appId = \"com.github.libvlc-go.gtk3-media-player-example\"\n\nfunc assertErr(err error) {\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n}\n\nfunc assertConv(ok bool) {\n\tif !ok {\n\t\tlog.Panic(\"invalid widget conversion\")\n\t}\n}\n\nfunc playerReleaseMedia(player *vlc.Player) {\n\tplayer.Stop()\n\tif media, _ := player.Media(); media != nil {\n\t\tmedia.Release()\n\t}\n}\n\nfunc main() {\n\t\/\/ Initialize libVLC module.\n\terr := vlc.Init(\"--quiet\", \"--no-xlib\")\n\tassertErr(err)\n\n\t\/\/ Create a new player.\n\tplayer, err := vlc.NewPlayer()\n\tassertErr(err)\n\n\t\/\/ Create new GTK application.\n\tapp, err := gtk.ApplicationNew(appId, glib.APPLICATION_FLAGS_NONE)\n\tassertErr(err)\n\n\tapp.Connect(\"activate\", func() {\n\t\t\/\/ Load application layout.\n\t\tbuilder, err := gtk.BuilderNewFromFile(\"layout.glade\")\n\t\tassertErr(err)\n\n\t\t\/\/ Get application window.\n\t\tobj, err := builder.GetObject(\"appWindow\")\n\t\tassertErr(err)\n\t\tappWin, ok := obj.(*gtk.ApplicationWindow)\n\t\tassertConv(ok)\n\n\t\t\/\/ Get play button.\n\t\tobj, err = builder.GetObject(\"playButton\")\n\t\tassertErr(err)\n\t\tplayButton, ok := obj.(*gtk.Button)\n\t\tassertConv(ok)\n\n\t\t\/\/ Add builder signal handlers.\n\t\tsignals := map[string]interface{}{\n\t\t\t\"onRealizePlayerArea\": func(playerArea *gtk.DrawingArea) {\n\t\t\t\t\/\/ Set window for the player.\n\t\t\t\tplayerWindow, err := playerArea.GetWindow()\n\t\t\t\tassertErr(err)\n\t\t\t\tsetPlayerWindow(player, playerWindow)\n\t\t\t\tassertErr(err)\n\t\t\t},\n\t\t\t\"onDrawPlayerArea\": func(playerArea *gtk.DrawingArea, cr *cairo.Context) {\n\t\t\t\tcr.SetSourceRGB(0, 0, 0)\n\t\t\t\tcr.Paint()\n\t\t\t},\n\t\t\t\"onActivateOpenFile\": func() {\n\t\t\t\tfileDialog, err := gtk.FileChooserDialogNewWith2Buttons(\n\t\t\t\t\t\"Choose file...\",\n\t\t\t\t\tappWin, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\t\t\t\"Cancel\", gtk.RESPONSE_DELETE_EVENT,\n\t\t\t\t\t\"Open\", gtk.RESPONSE_ACCEPT)\n\t\t\t\tassertErr(err)\n\t\t\t\tdefer fileDialog.Destroy()\n\n\t\t\t\tfileFilter, err := gtk.FileFilterNew()\n\t\t\t\tassertErr(err)\n\t\t\t\tfileFilter.SetName(\"Media files\")\n\t\t\t\tfileFilter.AddPattern(\"*.mp4\")\n\t\t\t\tfileFilter.AddPattern(\"*.mp3\")\n\t\t\t\tfileDialog.AddFilter(fileFilter)\n\n\t\t\t\tif result := fileDialog.Run(); result == gtk.RESPONSE_ACCEPT {\n\t\t\t\t\t\/\/ Release current media, if any.\n\t\t\t\t\tplayerReleaseMedia(player)\n\n\t\t\t\t\t\/\/ Get selected filename.\n\t\t\t\t\tfilename := fileDialog.GetFilename()\n\n\t\t\t\t\t\/\/ Load media and start playback.\n\t\t\t\t\tif _, err := player.LoadMediaFromPath(filename); err != nil {\n\t\t\t\t\t\tlog.Printf(\"Cannot load selected media: %s\\n\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tplayer.Play()\n\t\t\t\t\tplayButton.SetLabel(\"gtk-media-pause\")\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"onActivateQuit\": func() {\n\t\t\t\tapp.Quit()\n\t\t\t},\n\t\t\t\"onClickPlayButton\": func(playButton *gtk.Button) {\n\t\t\t\tif media, _ := player.Media(); media == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif player.IsPlaying() {\n\t\t\t\t\tplayer.SetPause(true)\n\t\t\t\t\tplayButton.SetLabel(\"gtk-media-play\")\n\t\t\t\t} else {\n\t\t\t\t\tplayer.Play()\n\t\t\t\t\tplayButton.SetLabel(\"gtk-media-pause\")\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"onClickStopButton\": func(stopButton *gtk.Button) {\n\t\t\t\tplayer.Stop()\n\t\t\t\tplayButton.SetLabel(\"gtk-media-play\")\n\t\t\t},\n\t\t}\n\t\tbuilder.ConnectSignals(signals)\n\n\t\tappWin.ShowAll()\n\t\tapp.AddWindow(appWin)\n\t})\n\n\t\/\/ Cleanup on exit.\n\tapp.Connect(\"shutdown\", func() {\n\t\tplayerReleaseMedia(player)\n\t\tplayer.Release()\n\t\tvlc.Release()\n\t})\n\n\t\/\/ Launch the application.\n\tos.Exit(app.Run(os.Args))\n}\n<commit_msg>Minor GTK 3 player example improvement<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\tvlc \"github.com\/adrg\/libvlc-go\/v3\"\n\t\"github.com\/gotk3\/gotk3\/cairo\"\n\t\"github.com\/gotk3\/gotk3\/glib\"\n\t\"github.com\/gotk3\/gotk3\/gtk\"\n)\n\nconst appId = \"com.github.libvlc-go.gtk3-media-player-example\"\n\nfunc builderGetObject(builder *gtk.Builder, name string) glib.IObject {\n\tobj, _ := builder.GetObject(name)\n\treturn obj\n}\n\nfunc assertErr(err error) {\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n}\n\nfunc assertConv(ok bool) {\n\tif !ok {\n\t\tlog.Panic(\"invalid widget conversion\")\n\t}\n}\n\nfunc playerReleaseMedia(player *vlc.Player) {\n\tplayer.Stop()\n\tif media, _ := player.Media(); media != nil {\n\t\tmedia.Release()\n\t}\n}\n\nfunc main() {\n\t\/\/ Initialize libVLC module.\n\terr := vlc.Init(\"--quiet\", \"--no-xlib\")\n\tassertErr(err)\n\n\t\/\/ Create a new player.\n\tplayer, err := vlc.NewPlayer()\n\tassertErr(err)\n\n\t\/\/ Create new GTK application.\n\tapp, err := gtk.ApplicationNew(appId, glib.APPLICATION_FLAGS_NONE)\n\tassertErr(err)\n\n\tapp.Connect(\"activate\", func() {\n\t\t\/\/ Load application layout.\n\t\tbuilder, err := gtk.BuilderNewFromFile(\"layout.glade\")\n\t\tassertErr(err)\n\n\t\t\/\/ Get application window.\n\t\tappWin, ok := builderGetObject(builder, \"appWindow\").(*gtk.ApplicationWindow)\n\t\tassertConv(ok)\n\n\t\t\/\/ Get play button.\n\t\tplayButton, ok := builderGetObject(builder, \"playButton\").(*gtk.Button)\n\t\tassertConv(ok)\n\n\t\t\/\/ Add builder signal handlers.\n\t\tsignals := map[string]interface{}{\n\t\t\t\"onRealizePlayerArea\": func(playerArea *gtk.DrawingArea) {\n\t\t\t\t\/\/ Set window for the player.\n\t\t\t\tplayerWindow, err := playerArea.GetWindow()\n\t\t\t\tassertErr(err)\n\t\t\t\tsetPlayerWindow(player, playerWindow)\n\t\t\t\tassertErr(err)\n\t\t\t},\n\t\t\t\"onDrawPlayerArea\": func(playerArea *gtk.DrawingArea, cr *cairo.Context) {\n\t\t\t\tcr.SetSourceRGB(0, 0, 0)\n\t\t\t\tcr.Paint()\n\t\t\t},\n\t\t\t\"onActivateOpenFile\": func() {\n\t\t\t\tfileDialog, err := gtk.FileChooserDialogNewWith2Buttons(\n\t\t\t\t\t\"Choose file...\",\n\t\t\t\t\tappWin, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\t\t\t\"Cancel\", gtk.RESPONSE_DELETE_EVENT,\n\t\t\t\t\t\"Open\", gtk.RESPONSE_ACCEPT)\n\t\t\t\tassertErr(err)\n\t\t\t\tdefer fileDialog.Destroy()\n\n\t\t\t\tfileFilter, err := gtk.FileFilterNew()\n\t\t\t\tassertErr(err)\n\t\t\t\tfileFilter.SetName(\"Media files\")\n\t\t\t\tfileFilter.AddPattern(\"*.mp4\")\n\t\t\t\tfileFilter.AddPattern(\"*.mp3\")\n\t\t\t\tfileDialog.AddFilter(fileFilter)\n\n\t\t\t\tif result := fileDialog.Run(); result == gtk.RESPONSE_ACCEPT {\n\t\t\t\t\t\/\/ Release current media, if any.\n\t\t\t\t\tplayerReleaseMedia(player)\n\n\t\t\t\t\t\/\/ Get selected filename.\n\t\t\t\t\tfilename := fileDialog.GetFilename()\n\n\t\t\t\t\t\/\/ Load media and start playback.\n\t\t\t\t\tif _, err := player.LoadMediaFromPath(filename); err != nil {\n\t\t\t\t\t\tlog.Printf(\"Cannot load selected media: %s\\n\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tplayer.Play()\n\t\t\t\t\tplayButton.SetLabel(\"gtk-media-pause\")\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"onActivateQuit\": func() {\n\t\t\t\tapp.Quit()\n\t\t\t},\n\t\t\t\"onClickPlayButton\": func(playButton *gtk.Button) {\n\t\t\t\tif media, _ := player.Media(); media == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif player.IsPlaying() {\n\t\t\t\t\tplayer.SetPause(true)\n\t\t\t\t\tplayButton.SetLabel(\"gtk-media-play\")\n\t\t\t\t} else {\n\t\t\t\t\tplayer.Play()\n\t\t\t\t\tplayButton.SetLabel(\"gtk-media-pause\")\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"onClickStopButton\": func(stopButton *gtk.Button) {\n\t\t\t\tplayer.Stop()\n\t\t\t\tplayButton.SetLabel(\"gtk-media-play\")\n\t\t\t},\n\t\t}\n\t\tbuilder.ConnectSignals(signals)\n\n\t\tappWin.ShowAll()\n\t\tapp.AddWindow(appWin)\n\t})\n\n\t\/\/ Cleanup on exit.\n\tapp.Connect(\"shutdown\", func() {\n\t\tplayerReleaseMedia(player)\n\t\tplayer.Release()\n\t\tvlc.Release()\n\t})\n\n\t\/\/ Launch the application.\n\tos.Exit(app.Run(os.Args))\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"fmt\"\n\t\"github.com\/namsral\/flag\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ ReservedSpace -\nconst ReservedSpace = 450000000 \/\/ 450Mb\n\n\/\/ Config -\ntype Config struct {\n\tFolders []string `json:\"folders\"`\n\tDryRun bool `json:\"dryRun\"`\n\tNotifyCalc int `json:\"notifyCalc\"`\n\tNotifyMove int `json:\"notifyMove\"`\n\tReservedAmount int64 `json:\"reservedAmount\"`\n\tReservedUnit string `json:\"reservedUnit\"`\n\tRsyncFlags []string `json:\"rsyncFlags\"`\n\tVersion string `json:\"version\"`\n}\n\n\/\/ NotifyCalc\/NotifyMove possible values\n\/\/ 0 - no notification\n\/\/ 1 - simple notification\n\/\/ 2 - detailed notification\n\n\/\/ Settings -\ntype Settings struct {\n\tConfig\n\n\tConf string\n\tPort string\n\tLog string\n\tAPIFolders []string\n\t\/\/ ReservedSpace int64\n}\n\n\/\/ NewSettings -\nfunc NewSettings(version string) (*Settings, error) {\n\tvar config, port, log, folders, rsyncFlags, apiFolders string\n\tvar dryRun bool\n\tvar notifyCalc, notifyMove int\n\n\t\/\/ \/boot\/config\/plugins\/unbalance\/\n\tflag.StringVar(&config, \"config\", \"\/boot\/config\/plugins\/unbalance\/unbalance.conf\", \"config location\")\n\tflag.StringVar(&port, \"port\", \"6237\", \"port to run the server\")\n\tflag.StringVar(&log, \"log\", \"\/boot\/logs\/unbalance.log\", \"pathname where log file will be written to\")\n\tflag.StringVar(&folders, \"folders\", \"\", \"folders that will be scanned for media\")\n\tflag.BoolVar(&dryRun, \"dryRun\", true, \"perform a dry-run rather than actual work\")\n\tflag.IntVar(¬ifyCalc, \"notifyCalc\", 0, \"notify via email after calculation operation has completed (unraid notifications must be set up first): 0 - No notifications; 1 - Simple notifications; 2 - Detailed notifications\")\n\tflag.IntVar(¬ifyMove, \"notifyMove\", 0, \"notify via email after move operation has completed (unraid notifications must be set up first): 0 - No notifications; 1 - Simple notifications; 2 - Detailed notifications\")\n\tflag.StringVar(&rsyncFlags, \"rsyncFlags\", \"\", \"custom rsync flags\")\n\tflag.StringVar(&apiFolders, \"apiFolders\", \"\/var\/local\/emhttp\", \"folders to look for api endpoints\")\n\n\tif found, _ := Exists(\"\/boot\/config\/plugins\/unbalance\/unbalance.conf\"); found {\n\t\tflag.Set(\"config\", \"\/boot\/config\/plugins\/unbalance\/unbalance.conf\")\n\t}\n\n\tflag.Parse()\n\n\t\/\/ fmt.Printf(\"folders: %s\\nconfig: %s\\n\", folders, config)\n\n\ts := &Settings{}\n\n\tif folders == \"\" {\n\t\ts.Folders = make([]string, 0)\n\t} else {\n\t\ts.Folders = strings.Split(folders, \"|\")\n\t}\n\n\tif rsyncFlags == \"\" {\n\t\ts.RsyncFlags = []string{\"-avRX\", \"--partial\"}\n\t} else {\n\t\ts.RsyncFlags = strings.Split(rsyncFlags, \"|\")\n\t}\n\n\ts.DryRun = dryRun\n\ts.NotifyCalc = notifyCalc\n\ts.NotifyMove = notifyMove\n\ts.ReservedAmount = ReservedSpace \/ 1000 \/ 1000\n\ts.ReservedUnit = \"Mb\"\n\ts.Version = version\n\n\ts.Conf = config\n\ts.Port = port\n\ts.Log = log\n\ts.APIFolders = strings.Split(apiFolders, \"|\")\n\n\treturn s, nil\n}\n\n\/\/ AddFolder -\nfunc (s *Settings) AddFolder(folder string) {\n\ts.Folders = append(s.Folders, folder)\n}\n\n\/\/ DeleteFolder -\nfunc (s *Settings) DeleteFolder(folder string) {\n\n\tindex := -1\n\tfor p, v := range s.Folders {\n\t\tif v == folder {\n\t\t\tindex = p\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif index == -1 {\n\t\treturn\n\t}\n\n\ts.Folders = append(s.Folders[:index], s.Folders[index+1:]...)\n}\n\n\/\/ ToggleDryRun -\nfunc (s *Settings) ToggleDryRun() {\n\ts.DryRun = !s.DryRun\n}\n\n\/\/ Save -\nfunc (s *Settings) Save() (err error) {\n\ttmpFile := s.Conf + \".tmp\"\n\n\tfolders := strings.Join(s.Folders, \"|\")\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"folders=%s\", folders)); err != nil {\n\t\treturn err\n\t}\n\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"dryRun=%t\", s.DryRun)); err != nil {\n\t\treturn err\n\t}\n\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"notifyCalc=%d\", s.NotifyCalc)); err != nil {\n\t\treturn err\n\t}\n\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"notifyMove=%d\", s.NotifyCalc)); err != nil {\n\t\treturn err\n\t}\n\n\trsyncFlags := strings.Join(s.RsyncFlags, \"|\")\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"rsyncFlags=%s\", rsyncFlags)); err != nil {\n\t\treturn err\n\t}\n\n\tos.Rename(tmpFile, s.Conf)\n\n\treturn\n}\n<commit_msg>Closes #26<commit_after>package lib\n\nimport (\n\t\"fmt\"\n\t\"github.com\/namsral\/flag\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ ReservedSpace -\nconst ReservedSpace = 450000000 \/\/ 450Mb\n\n\/\/ Config -\ntype Config struct {\n\tFolders []string `json:\"folders\"`\n\tDryRun bool `json:\"dryRun\"`\n\tNotifyCalc int `json:\"notifyCalc\"`\n\tNotifyMove int `json:\"notifyMove\"`\n\tReservedAmount int64 `json:\"reservedAmount\"`\n\tReservedUnit string `json:\"reservedUnit\"`\n\tRsyncFlags []string `json:\"rsyncFlags\"`\n\tVersion string `json:\"version\"`\n}\n\n\/\/ NotifyCalc\/NotifyMove possible values\n\/\/ 0 - no notification\n\/\/ 1 - simple notification\n\/\/ 2 - detailed notification\n\n\/\/ Settings -\ntype Settings struct {\n\tConfig\n\n\tConf string\n\tPort string\n\tLog string\n\tAPIFolders []string\n\t\/\/ ReservedSpace int64\n}\n\n\/\/ NewSettings -\nfunc NewSettings(version string) (*Settings, error) {\n\tvar config, port, log, folders, rsyncFlags, apiFolders string\n\tvar dryRun bool\n\tvar notifyCalc, notifyMove int\n\n\t\/\/ \/boot\/config\/plugins\/unbalance\/\n\tflag.StringVar(&config, \"config\", \"\/boot\/config\/plugins\/unbalance\/unbalance.conf\", \"config location\")\n\tflag.StringVar(&port, \"port\", \"6237\", \"port to run the server\")\n\tflag.StringVar(&log, \"log\", \"\/boot\/logs\/unbalance.log\", \"pathname where log file will be written to\")\n\tflag.StringVar(&folders, \"folders\", \"\", \"folders that will be scanned for media\")\n\tflag.BoolVar(&dryRun, \"dryRun\", true, \"perform a dry-run rather than actual work\")\n\tflag.IntVar(¬ifyCalc, \"notifyCalc\", 0, \"notify via email after calculation operation has completed (unraid notifications must be set up first): 0 - No notifications; 1 - Simple notifications; 2 - Detailed notifications\")\n\tflag.IntVar(¬ifyMove, \"notifyMove\", 0, \"notify via email after move operation has completed (unraid notifications must be set up first): 0 - No notifications; 1 - Simple notifications; 2 - Detailed notifications\")\n\tflag.StringVar(&rsyncFlags, \"rsyncFlags\", \"\", \"custom rsync flags\")\n\tflag.StringVar(&apiFolders, \"apiFolders\", \"\/var\/local\/emhttp\", \"folders to look for api endpoints\")\n\n\tif found, _ := Exists(\"\/boot\/config\/plugins\/unbalance\/unbalance.conf\"); found {\n\t\tflag.Set(\"config\", \"\/boot\/config\/plugins\/unbalance\/unbalance.conf\")\n\t}\n\n\tflag.Parse()\n\n\t\/\/ fmt.Printf(\"folders: %s\\nconfig: %s\\n\", folders, config)\n\n\ts := &Settings{}\n\n\tif folders == \"\" {\n\t\ts.Folders = make([]string, 0)\n\t} else {\n\t\ts.Folders = strings.Split(folders, \"|\")\n\t}\n\n\tif rsyncFlags == \"\" {\n\t\ts.RsyncFlags = []string{\"-avRX\", \"--partial\"}\n\t} else {\n\t\ts.RsyncFlags = strings.Split(rsyncFlags, \"|\")\n\t}\n\n\ts.DryRun = dryRun\n\ts.NotifyCalc = notifyCalc\n\ts.NotifyMove = notifyMove\n\ts.ReservedAmount = ReservedSpace \/ 1000 \/ 1000\n\ts.ReservedUnit = \"Mb\"\n\ts.Version = version\n\n\ts.Conf = config\n\ts.Port = port\n\ts.Log = log\n\ts.APIFolders = strings.Split(apiFolders, \"|\")\n\n\treturn s, nil\n}\n\n\/\/ AddFolder -\nfunc (s *Settings) AddFolder(folder string) {\n\ts.Folders = append(s.Folders, folder)\n}\n\n\/\/ DeleteFolder -\nfunc (s *Settings) DeleteFolder(folder string) {\n\n\tindex := -1\n\tfor p, v := range s.Folders {\n\t\tif v == folder {\n\t\t\tindex = p\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif index == -1 {\n\t\treturn\n\t}\n\n\ts.Folders = append(s.Folders[:index], s.Folders[index+1:]...)\n}\n\n\/\/ ToggleDryRun -\nfunc (s *Settings) ToggleDryRun() {\n\ts.DryRun = !s.DryRun\n}\n\n\/\/ Save -\nfunc (s *Settings) Save() (err error) {\n\ttmpFile := s.Conf + \".tmp\"\n\n\tfolders := strings.Join(s.Folders, \"|\")\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"folders=%s\", folders)); err != nil {\n\t\treturn err\n\t}\n\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"dryRun=%t\", s.DryRun)); err != nil {\n\t\treturn err\n\t}\n\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"notifyCalc=%d\", s.NotifyCalc)); err != nil {\n\t\treturn err\n\t}\n\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"notifyMove=%d\", s.NotifyMove)); err != nil {\n\t\treturn err\n\t}\n\n\trsyncFlags := strings.Join(s.RsyncFlags, \"|\")\n\tif err = WriteLine(tmpFile, fmt.Sprintf(\"rsyncFlags=%s\", rsyncFlags)); err != nil {\n\t\treturn err\n\t}\n\n\tos.Rename(tmpFile, s.Conf)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"upspin.io\/errors\"\n\t\"upspin.io\/path\"\n\t\"upspin.io\/test\/testenv\"\n\t\"upspin.io\/upspin\"\n)\n\nfunc testSnapshot(t *testing.T, r *testenv.Runner) {\n\tconst (\n\t\tbase = ownerName + \"\/snapshot-test\"\n\t\tdir = base + \"\/dir\"\n\t\tfile = dir + \"\/file\"\n\t\taccessFile = base + \"\/Access\"\n\t\taccess = \"*:all\" \/\/ intentionally permissive.\n\t\tsnapshotDir = snapshotUser + \"\/\"\n\n\t\t\/\/ must be in sync with dir\/server\/snapshot.go\n\t\tsnapshotControlFile = snapshotDir + \"\/TakeSnapshot\"\n\t)\n\n\tdata := randomString(t, 16)\n\n\tr.As(ownerName)\n\tr.MakeDirectory(base)\n\tr.Put(accessFile, access)\n\tr.MakeDirectory(dir)\n\tr.Put(file, data)\n\tif r.Failed() {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ Take the snapshot.\n\tr.As(snapshotUser)\n\tr.MakeDirectory(snapshotDir)\n\tr.Put(snapshotControlFile, \"\")\n\tif r.Failed() {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ If the control file actually exists in snapshotDir, then this\n\t\/\/ DirServer does not support snapshotting.\n\tr.Get(snapshotControlFile)\n\tif !r.Failed() {\n\t\tif r.Data == \"\" {\n\t\t\tt.Log(\"Snapshotting not supported.\")\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"Non-empty snapshot control file: %q.\", r.Data)\n\t}\n\n\t\/\/ Verify snapshot was taken today and has the correct data in it.\n\tr.As(ownerName)\n\tsnapPattern := snapshotDir + time.Now().UTC().Format(\"2006\/01\/02\") + \"\/\"\n\n\t\/\/ Watch the snapshot root for the new snapshot.\n\tdone := r.DirWatch(upspin.PathName(snapPattern), -1)\n\tif r.Failed() {\n\t\tt.Fatal(r.Diag())\n\t}\n\t\/\/ There could be many entries, since snapshots are for the root. Keep\n\t\/\/ looking until we find what we want.\n\tvar found upspin.PathName\n\tfor i := 0; i < 2; i++ {\n\t\t\/\/ We use GetNEvents because we don't have a fixed name to use\n\t\t\/\/ with r.GotEvent(name). We need two entries, the top directory\n\t\t\/\/ with the date and the sub directory with the time.\n\t\tif !r.GetNEvents(2) {\n\t\t\tt.Fatal(r.Diag())\n\t\t}\n\t\tentry := r.Events[1].Entry\n\n\t\t\/\/ Check entry contents and name.\n\t\tfile := path.Join(entry.Name, \"snapshot-test\", \"dir\", \"file\")\n\t\tr.Get(file)\n\t\tif r.Failed() {\n\t\t\t\/\/ TODO: remove once the failure is fixed.\n\t\t\tt.Logf(\"Failed to Get %q. Attempting to Glob %q\", file, entry.Name)\n\t\t\tdebugFailure(t, r, entry.Name, file)\n\n\t\t\t\/\/ t.Fatal(r.Diag())\n\t\t}\n\t\tif r.Data == data {\n\t\t\tfound = file\n\t\t\tbreak\n\t\t}\n\t}\n\tclose(done)\n\tif found == \"\" {\n\t\tt.Fatalf(\"Unable to find a snapshot in %s\", snapPattern)\n\t}\n\n\t\/\/ Ensure no one else can read this snapshotted file, even with a\n\t\/\/ permissive Access file.\n\tr.As(readerName)\n\tr.DirLookup(found)\n\tif !r.Match(errors.E(errors.Private)) {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ WhichAccess for a snapshotted name returns nothing, even if the\n\t\/\/ Access file exists in the path, which is the case here.\n\tr.As(ownerName)\n\tr.DirWhichAccess(found)\n\tif !r.GotNilEntry() {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ No one can delete snapshots.\n\tr.Delete(found)\n\tif !r.Match(errors.E(errors.Permission)) {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ No one can overwrite a snapshot.\n\tr.Put(found, \"yo\")\n\tif !r.Match(errors.E(errors.Permission)) {\n\t\tt.Fatal(r.Diag())\n\t}\n}\n\n\/\/ TODO: remove.\nfunc debugFailure(t *testing.T, r *testenv.Runner, dir, file upspin.PathName) {\n\tc := r.ClientFor(ownerName)\n\n\tentries, err := c.Glob(string(dir) + \"\/*\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error Globbing: %v\", err)\n\t}\n\tfor i, e := range entries {\n\t\tt.Logf(\"%d: %s\", i, e.Name)\n\t}\n\n\te, err := c.Lookup(file, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Error in lookup of %q: %v\", file, err)\n\t}\n\tt.Logf(\"Entry: %+v\", e)\n}\n\nfunc randomString(t *testing.T, size int) string {\n\tbuf := make([]byte, size)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn fmt.Sprintf(\"%x\", buf)\n}\n<commit_msg>test: add more instrumentation to flaky snapshot test<commit_after>\/\/ Copyright 2017 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"upspin.io\/errors\"\n\t\"upspin.io\/path\"\n\t\"upspin.io\/test\/testenv\"\n\t\"upspin.io\/upspin\"\n)\n\nfunc testSnapshot(t *testing.T, r *testenv.Runner) {\n\tconst (\n\t\tbase = ownerName + \"\/snapshot-test\"\n\t\tdir = base + \"\/dir\"\n\t\tfile = dir + \"\/file\"\n\t\taccessFile = base + \"\/Access\"\n\t\taccess = \"*:all\" \/\/ intentionally permissive.\n\t\tsnapshotDir = snapshotUser + \"\/\"\n\n\t\t\/\/ must be in sync with dir\/server\/snapshot.go\n\t\tsnapshotControlFile = snapshotDir + \"\/TakeSnapshot\"\n\t)\n\n\tdata := randomString(t, 16)\n\n\tr.As(ownerName)\n\tr.MakeDirectory(base)\n\tr.Put(accessFile, access)\n\tr.MakeDirectory(dir)\n\tr.Put(file, data)\n\tif r.Failed() {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ Take the snapshot.\n\tr.As(snapshotUser)\n\tr.MakeDirectory(snapshotDir)\n\tr.Put(snapshotControlFile, \"\")\n\tif r.Failed() {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ If the control file actually exists in snapshotDir, then this\n\t\/\/ DirServer does not support snapshotting.\n\tr.Get(snapshotControlFile)\n\tif !r.Failed() {\n\t\tif r.Data == \"\" {\n\t\t\tt.Log(\"Snapshotting not supported.\")\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"Non-empty snapshot control file: %q.\", r.Data)\n\t}\n\tt.Log(\"Snapshot command issued\")\n\n\t\/\/ Verify snapshot was taken today and has the correct data in it.\n\tr.As(ownerName)\n\tsnapPattern := snapshotDir + time.Now().UTC().Format(\"2006\/01\/02\") + \"\/\"\n\n\t\/\/ Watch the snapshot root for the new snapshot.\n\tdone := r.DirWatch(upspin.PathName(snapPattern), -1)\n\tif r.Failed() {\n\t\tt.Fatal(r.Diag())\n\t}\n\t\/\/ There could be many entries, since snapshots are for the root. Keep\n\t\/\/ looking until we find what we want.\n\tvar found upspin.PathName\n\tfor i := 0; i < 2; i++ {\n\t\t\/\/ We use GetNEvents because we don't have a fixed name to use\n\t\t\/\/ with r.GotEvent(name). We need two entries, the top directory\n\t\t\/\/ with the date and the sub directory with the time.\n\t\tif !r.GetNEvents(2) {\n\t\t\tt.Fatal(r.Diag())\n\t\t}\n\t\tentry := r.Events[1].Entry\n\n\t\t\/\/ Check entry contents and name.\n\t\tfile := path.Join(entry.Name, \"snapshot-test\", \"dir\", \"file\")\n\t\tr.Get(file)\n\t\tif r.Failed() {\n\t\t\t\/\/ TODO: remove once the failure is fixed.\n\t\t\tt.Logf(\"Failed to Get %q. Attempting to Glob %q\", file, entry.Name)\n\t\t\tdebugFailure(t, r, entry.Name, file)\n\n\t\t\t\/\/ t.Fatal(r.Diag())\n\t\t}\n\t\tif r.Data == data {\n\t\t\tfound = file\n\t\t\tbreak\n\t\t}\n\t}\n\tclose(done)\n\tif found == \"\" {\n\t\tt.Fatalf(\"Unable to find a snapshot in %s\", snapPattern)\n\t}\n\n\t\/\/ Ensure no one else can read this snapshotted file, even with a\n\t\/\/ permissive Access file.\n\tr.As(readerName)\n\tr.DirLookup(found)\n\tif !r.Match(errors.E(errors.Private)) {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ WhichAccess for a snapshotted name returns nothing, even if the\n\t\/\/ Access file exists in the path, which is the case here.\n\tr.As(ownerName)\n\tr.DirWhichAccess(found)\n\tif !r.GotNilEntry() {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ No one can delete snapshots.\n\tr.Delete(found)\n\tif !r.Match(errors.E(errors.Permission)) {\n\t\tt.Fatal(r.Diag())\n\t}\n\n\t\/\/ No one can overwrite a snapshot.\n\tr.Put(found, \"yo\")\n\tif !r.Match(errors.E(errors.Permission)) {\n\t\tt.Fatal(r.Diag())\n\t}\n}\n\n\/\/ TODO: remove.\nfunc debugFailure(t *testing.T, r *testenv.Runner, dir, file upspin.PathName) {\n\tc := r.ClientFor(ownerName)\n\n\tentries, err := c.Glob(string(dir) + \"\/*\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error Globbing: %v\", err)\n\t}\n\tfor i, e := range entries {\n\t\tt.Logf(\"%d: %s\", i, e.Name)\n\t}\n\n\te, err := c.Lookup(file, true)\n\tif err != nil {\n\t\tt.Errorf(\"Error in lookup of %q: %v\", file, err)\n\t}\n\tt.Logf(\"Entry: %+v\", e)\n}\n\nfunc randomString(t *testing.T, size int) string {\n\tbuf := make([]byte, size)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn fmt.Sprintf(\"%x\", buf)\n}\n<|endoftext|>"} {"text":"<commit_before>package sgsimulator\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\ntype SGSimulator struct{}\n\ntype BulkDocsResponse struct {\n\tName string\n\tHobbies []string\n}\n\nfunc HomeHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"HomeHandler called with req: %+v\", req)\n\tw.Write([]byte(\"Sync Gateway Simulator\\n\"))\n}\n\nfunc DoNothingHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"DoNothingHandler called with req: %+v\", req)\n\tw.Write([]byte(\"Sync Gateway Simulator DB\\n\"))\n}\n\nfunc BulkDocsHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"BulkDocsHandler called with req: %+v\", req)\n\n\tbulkDocsResponseSlice := []map[string]string{}\n\tbulkDocResponse := map[string]string{\n\t\t\"id\": \"1\",\n\t\t\"rev\": \"1-34243\",\n\t}\n\tbulkDocsResponseSlice = append(bulkDocsResponseSlice, bulkDocResponse)\n\tjs, err := json.Marshal(bulkDocsResponseSlice)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(js)\n\n\tw.Write([]byte(\"Sync Gateway Simulator DB\\n\"))\n}\n\nfunc NewSGSimulator() *SGSimulator {\n\treturn &SGSimulator{}\n}\n\nfunc (sg *SGSimulator) Run() {\n\n\t\/\/ TODO: parameterize via CLI\n\tdbName := \"db\"\n\tport := 8000\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"\/\", DoNothingHandler)\n\tdbRouter := r.PathPrefix(fmt.Sprintf(\"\/%v\", dbName)).Subrouter()\n\tdbRouter.Path(\"\/_user\/\").HandlerFunc(DoNothingHandler)\n\tdbRouter.Path(\"\/_bulk_docs\").HandlerFunc(BulkDocsHandler)\n\n\thttp.Handle(\"\/\", r)\n\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: fmt.Sprintf(\"127.0.0.1:%d\", port),\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tlog.Printf(\"Listening on %v\", srv.Addr)\n\n\tlog.Fatal(srv.ListenAndServe())\n\n}\n<commit_msg>Handles POSTing single docs<commit_after>package sgsimulator\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\ntype SGSimulator struct{}\n\ntype BulkDocsResponse struct {\n\tName string\n\tHobbies []string\n}\n\nfunc HomeHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"HomeHandler called with req: %+v\", req)\n\tw.Write([]byte(\"Sync Gateway Simulator\\n\"))\n}\n\nfunc DoNothingHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"DoNothingHandler called with req: %+v\", req)\n\tw.Write([]byte(\"Sync Gateway Simulator DB\\n\"))\n}\n\nfunc BulkDocsHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"BulkDocsHandler called with req: %+v\", req)\n\n\tbulkDocsResponseSlice := []map[string]string{}\n\tbulkDocResponse := map[string]string{\n\t\t\"id\": \"1\",\n\t\t\"rev\": \"1-34243\",\n\t}\n\tbulkDocsResponseSlice = append(bulkDocsResponseSlice, bulkDocResponse)\n\tjs, err := json.Marshal(bulkDocsResponseSlice)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(js)\n\n\tw.Write([]byte(\"Sync Gateway Simulator DB\\n\"))\n}\n\nfunc NewSGSimulator() *SGSimulator {\n\treturn &SGSimulator{}\n}\n\nfunc (sg *SGSimulator) Run() {\n\n\t\/\/ TODO: parameterize via CLI\n\tdbName := \"db\"\n\tport := 8000\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"\/\", DoNothingHandler)\n\tdbRouter := r.PathPrefix(fmt.Sprintf(\"\/%v\", dbName)).Subrouter()\n\tdbRouter.Path(\"\/\").HandlerFunc(DoNothingHandler)\n\tdbRouter.Path(\"\/_user\/\").HandlerFunc(DoNothingHandler)\n\tdbRouter.Path(\"\/_bulk_docs\").HandlerFunc(BulkDocsHandler)\n\n\thttp.Handle(\"\/\", r)\n\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: fmt.Sprintf(\"127.0.0.1:%d\", port),\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tlog.Printf(\"Listening on %v\", srv.Addr)\n\n\tlog.Fatal(srv.ListenAndServe())\n\n}\n<|endoftext|>"} {"text":"<commit_before>package eagain\n\nimport (\n\t\"io\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\n\/\/ Reader represents an io.Reader that handles EAGAIN\ntype Reader struct {\n\tReader io.Reader\n}\n\n\/\/ Read behaves like io.Reader.Read but will retry on EAGAIN\nfunc (er Reader) Read(p []byte) (int, error) {\nagain:\n\tn, err := er.Reader.Read(p)\n\tif err == nil {\n\t\treturn n, nil\n\t}\n\n\t\/\/ keep retrying on EAGAIN\n\terrno, ok := shared.GetErrno(err)\n\tif ok && (errno == unix.EAGAIN || errno == unix.EINTR) {\n\t\tgoto again\n\t}\n\n\treturn n, err\n}\n\n\/\/ Writer represents an io.Writer that handles EAGAIN\ntype Writer struct {\n\tWriter io.Writer\n}\n\n\/\/ Write behaves like io.Writer.Write but will retry on EAGAIN\nfunc (ew Writer) Write(p []byte) (int, error) {\nagain:\n\tn, err := ew.Writer.Write(p)\n\tif err == nil {\n\t\treturn n, nil\n\t}\n\n\t\/\/ keep retrying on EAGAIN\n\terrno, ok := shared.GetErrno(err)\n\tif ok && (errno == unix.EAGAIN || errno == unix.EINTR) {\n\t\tgoto again\n\t}\n\n\treturn n, err\n}\n<commit_msg>shared\/eagain: Restrict to Linux<commit_after>\/\/ +build linux\n\npackage eagain\n\nimport (\n\t\"io\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\n\/\/ Reader represents an io.Reader that handles EAGAIN\ntype Reader struct {\n\tReader io.Reader\n}\n\n\/\/ Read behaves like io.Reader.Read but will retry on EAGAIN\nfunc (er Reader) Read(p []byte) (int, error) {\nagain:\n\tn, err := er.Reader.Read(p)\n\tif err == nil {\n\t\treturn n, nil\n\t}\n\n\t\/\/ keep retrying on EAGAIN\n\terrno, ok := shared.GetErrno(err)\n\tif ok && (errno == unix.EAGAIN || errno == unix.EINTR) {\n\t\tgoto again\n\t}\n\n\treturn n, err\n}\n\n\/\/ Writer represents an io.Writer that handles EAGAIN\ntype Writer struct {\n\tWriter io.Writer\n}\n\n\/\/ Write behaves like io.Writer.Write but will retry on EAGAIN\nfunc (ew Writer) Write(p []byte) (int, error) {\nagain:\n\tn, err := ew.Writer.Write(p)\n\tif err == nil {\n\t\treturn n, nil\n\t}\n\n\t\/\/ keep retrying on EAGAIN\n\terrno, ok := shared.GetErrno(err)\n\tif ok && (errno == unix.EAGAIN || errno == unix.EINTR) {\n\t\tgoto again\n\t}\n\n\treturn n, err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tdep \"github.com\/hashicorp\/consul-template\/dependency\"\n)\n\n\/\/ now is function that represents the current time in UTC. This is here\n\/\/ primarily for the tests to override times.\nvar now = func() time.Time { return time.Now().UTC() }\n\n\/\/ datacentersFunc returns or accumulates datacenter dependencies.\nfunc datacentersFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]string, error) {\n\treturn func(s ...string) ([]string, error) {\n\t\tresult := make([]string, 0)\n\n\t\td, err := dep.ParseDatacenters(s...)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]string), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ fileFunc returns or accumulates file dependencies.\nfunc fileFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) (string, error) {\n\treturn func(s string) (string, error) {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\td, err := dep.ParseFile(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tif value == nil {\n\t\t\t\treturn \"\", nil\n\t\t\t} else {\n\t\t\t\treturn value.(string), nil\n\t\t\t}\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn \"\", nil\n\t}\n}\n\n\/\/ keyFunc returns or accumulates key dependencies.\nfunc keyFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) (string, error) {\n\treturn func(s string) (string, error) {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\td, err := dep.ParseStoreKey(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tif value == nil {\n\t\t\t\treturn \"\", nil\n\t\t\t} else {\n\t\t\t\treturn value.(string), nil\n\t\t\t}\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn \"\", nil\n\t}\n}\n\n\/\/ lsFunc returns or accumulates keyPrefix dependencies.\nfunc lsFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) {\n\treturn func(s string) ([]*dep.KeyPair, error) {\n\t\tresult := make([]*dep.KeyPair, 0)\n\n\t\tif len(s) == 0 {\n\t\t\treturn result, nil\n\t\t}\n\n\t\td, err := dep.ParseStoreKeyPrefix(s)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\t\/\/ Only return non-empty top-level keys\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tfor _, pair := range value.([]*dep.KeyPair) {\n\t\t\t\tif pair.Key != \"\" && !strings.Contains(pair.Key, \"\/\") {\n\t\t\t\t\tresult = append(result, pair)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ nodesFunc returns or accumulates catalog node dependencies.\nfunc nodesFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]*dep.Node, error) {\n\treturn func(s ...string) ([]*dep.Node, error) {\n\t\tresult := make([]*dep.Node, 0)\n\n\t\td, err := dep.ParseCatalogNodes(s...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]*dep.Node), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ serviceFunc returns or accumulates health service dependencies.\nfunc serviceFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]*dep.HealthService, error) {\n\treturn func(s ...string) ([]*dep.HealthService, error) {\n\t\tresult := make([]*dep.HealthService, 0)\n\n\t\tif len(s) == 0 || s[0] == \"\" {\n\t\t\treturn result, nil\n\t\t}\n\n\t\td, err := dep.ParseHealthServices(s...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]*dep.HealthService), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ servicesFunc returns or accumulates catalog services dependencies.\nfunc servicesFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]*dep.CatalogService, error) {\n\treturn func(s ...string) ([]*dep.CatalogService, error) {\n\t\tresult := make([]*dep.CatalogService, 0)\n\n\t\td, err := dep.ParseCatalogServices(s...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]*dep.CatalogService), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ treeFunc returns or accumulates keyPrefix dependencies.\nfunc treeFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) {\n\treturn func(s string) ([]*dep.KeyPair, error) {\n\t\tresult := make([]*dep.KeyPair, 0)\n\n\t\tif len(s) == 0 {\n\t\t\treturn result, nil\n\t\t}\n\n\t\td, err := dep.ParseStoreKeyPrefix(s)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\t\/\/ Only return non-empty top-level keys\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tfor _, pair := range value.([]*dep.KeyPair) {\n\t\t\t\tparts := strings.Split(pair.Key, \"\/\")\n\t\t\t\tif parts[len(parts)-1] != \"\" {\n\t\t\t\t\tresult = append(result, pair)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n\n}\n\n\/\/ byKey accepts a slice of KV pairs and returns a map of the top-level\n\/\/ key to all its subkeys. For example:\n\/\/\n\/\/\t\telasticsearch\/a \/\/=> \"1\"\n\/\/\t\telasticsearch\/b \/\/=> \"2\"\n\/\/\t\tredis\/a\/b \/\/=> \"3\"\n\/\/\n\/\/ Passing the result from Consul through byTag would yield:\n\/\/\n\/\/ \t\tmap[string]map[string]string{\n\/\/\t \t\"elasticsearch\": &dep.KeyPair{\"a\": \"1\"}, &dep.KeyPair{\"b\": \"2\"},\n\/\/\t\t\t\"redis\": &dep.KeyPair{\"a\/b\": \"3\"}\n\/\/\t\t}\n\/\/\n\/\/ Note that the top-most key is stripped from the Key value. Keys that have no\n\/\/ prefix after stripping are removed from the list.\nfunc byKey(pairs []*dep.KeyPair) (map[string][]*dep.KeyPair, error) {\n\tm := make(map[string][]*dep.KeyPair)\n\tfor _, pair := range pairs {\n\t\tparts := strings.Split(pair.Key, \"\/\")\n\t\ttop := parts[0]\n\t\tkey := strings.Join(parts[1:], \"\/\")\n\n\t\tif key == \"\" {\n\t\t\t\/\/ Do not add a key if it has no prefix after stripping.\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := m[top]; !ok {\n\t\t\tm[top] = make([]*dep.KeyPair, 0, 1)\n\t\t}\n\t\tpair.Key = key\n\t\tm[top] = append(m[top], pair)\n\t}\n\treturn m, nil\n}\n\n\/\/ byTag is a template func that takes the provided services and\n\/\/ produces a map based on Service tags.\n\/\/\n\/\/ The map key is a string representing the service tag. The map value is a\n\/\/ slice of Services which have the tag assigned.\nfunc byTag(in []*dep.HealthService) (map[string][]*dep.HealthService, error) {\n\tm := make(map[string][]*dep.HealthService)\n\tfor _, s := range in {\n\t\tfor _, t := range s.Tags {\n\t\t\tm[t] = append(m[t], s)\n\t\t}\n\t}\n\treturn m, nil\n}\n\n\/\/ env returns the value of the environment variable set\nfunc env(s string) (string, error) {\n\treturn os.Getenv(s), nil\n}\n\n\/\/ loop accepts varying parameters and differs its behavior. If given one\n\/\/ parameter, loop will return a goroutine that begins at 0 and loops until the\n\/\/ given int, increasing the index by 1 each iteration. If given two parameters,\n\/\/ loop will return a goroutine that begins at the first parameter and loops\n\/\/ up to but not including the second parameter.\n\/\/\n\/\/ \/\/ Prints 0 1 2 3 4\n\/\/ \t\tfor _, i := range loop(5) {\n\/\/ \t\t\tprint(i)\n\/\/ \t\t}\n\/\/\n\/\/ \/\/ Prints 5 6 7\n\/\/ \t\tfor _, i := range loop(5, 8) {\n\/\/ \t\t\tprint(i)\n\/\/ \t\t}\n\/\/\nfunc loop(ints ...int) (<-chan int, error) {\n\tvar start, stop int\n\tswitch len(ints) {\n\tcase 1:\n\t\tstart, stop = 0, ints[0]\n\tcase 2:\n\t\tstart, stop = ints[0], ints[1]\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"loop: wrong number of arguments, expected 1 or 2\"+\n\t\t\t\", but got %d\", len(ints))\n\t}\n\n\tch := make(chan int)\n\n\tgo func() {\n\t\tfor i := start; i < stop; i++ {\n\t\t\tch <- i\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch, nil\n}\n\n\/\/ parseJSON returns a structure for valid JSON\nfunc parseJSON(s string) (interface{}, error) {\n\tif s == \"\" {\n\t\treturn make([]interface{}, 0), nil\n\t}\n\n\tvar data interface{}\n\tif err := json.Unmarshal([]byte(s), &data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}\n\n\/\/ replaceAll replaces all occurrences of a value in a string with the given\n\/\/ replacement value.\nfunc replaceAll(f, t, s string) (string, error) {\n\treturn strings.Replace(s, f, t, -1), nil\n}\n\n\/\/ regexReplaceAll replaces all occurrences of a regular expression with\n\/\/ the given replacement value.\nfunc regexReplaceAll(re, pl, s string) (string, error) {\n\tcompiled, err := regexp.Compile(re)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn compiled.ReplaceAllString(s, pl), nil\n}\n\n\/\/ timestamp returns the current UNIX timestamp in UTC. If an argument is\n\/\/ specified, it will be used to format the timestamp.\nfunc timestamp(s ...string) (string, error) {\n\tswitch len(s) {\n\tcase 0:\n\t\treturn now().Format(time.RFC3339), nil\n\tcase 1:\n\t\treturn now().Format(s[0]), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"timestamp: wrong number of arguments, expected 0 or 1\"+\n\t\t\t\", but got %d\", len(s))\n\t}\n}\n\n\/\/ toLower converts the given string (usually by a pipe) to lowercase.\nfunc toLower(s string) (string, error) {\n\treturn strings.ToLower(s), nil\n}\n\n\/\/ toTitle converts the given string (usually by a pipe) to titlecase.\nfunc toTitle(s string) (string, error) {\n\treturn strings.Title(s), nil\n}\n\n\/\/ toUpper converts the given string (usually by a pipe) to uppercase.\nfunc toUpper(s string) (string, error) {\n\treturn strings.ToUpper(s), nil\n}\n\n\/\/ addDependency adds the given Dependency to the map.\nfunc addDependency(m map[string]dep.Dependency, d dep.Dependency) {\n\tif _, ok := m[d.HashCode()]; !ok {\n\t\tm[d.HashCode()] = d\n\t}\n}\n<commit_msg>checkpoint<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tdep \"github.com\/hashicorp\/consul-template\/dependency\"\n\t\"github.com\/mitchellh\/copystructure\"\n)\n\n\/\/ now is function that represents the current time in UTC. This is here\n\/\/ primarily for the tests to override times.\nvar now = func() time.Time { return time.Now().UTC() }\n\n\/\/ datacentersFunc returns or accumulates datacenter dependencies.\nfunc datacentersFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]string, error) {\n\treturn func(s ...string) ([]string, error) {\n\t\tresult := make([]string, 0)\n\n\t\td, err := dep.ParseDatacenters(s...)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]string), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ fileFunc returns or accumulates file dependencies.\nfunc fileFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) (string, error) {\n\treturn func(s string) (string, error) {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\td, err := dep.ParseFile(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tif value == nil {\n\t\t\t\treturn \"\", nil\n\t\t\t} else {\n\t\t\t\treturn value.(string), nil\n\t\t\t}\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn \"\", nil\n\t}\n}\n\n\/\/ keyFunc returns or accumulates key dependencies.\nfunc keyFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) (string, error) {\n\treturn func(s string) (string, error) {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\td, err := dep.ParseStoreKey(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tif value == nil {\n\t\t\t\treturn \"\", nil\n\t\t\t} else {\n\t\t\t\treturn value.(string), nil\n\t\t\t}\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn \"\", nil\n\t}\n}\n\n\/\/ lsFunc returns or accumulates keyPrefix dependencies.\nfunc lsFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) {\n\treturn func(s string) ([]*dep.KeyPair, error) {\n\t\tresult := make([]*dep.KeyPair, 0)\n\n\t\tif len(s) == 0 {\n\t\t\treturn result, nil\n\t\t}\n\n\t\td, err := dep.ParseStoreKeyPrefix(s)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\t\/\/ Only return non-empty top-level keys\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tfor _, pair := range value.([]*dep.KeyPair) {\n\t\t\t\tif pair.Key != \"\" && !strings.Contains(pair.Key, \"\/\") {\n\t\t\t\t\tresult = append(result, pair)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ nodesFunc returns or accumulates catalog node dependencies.\nfunc nodesFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]*dep.Node, error) {\n\treturn func(s ...string) ([]*dep.Node, error) {\n\t\tresult := make([]*dep.Node, 0)\n\n\t\td, err := dep.ParseCatalogNodes(s...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]*dep.Node), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ serviceFunc returns or accumulates health service dependencies.\nfunc serviceFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]*dep.HealthService, error) {\n\treturn func(s ...string) ([]*dep.HealthService, error) {\n\t\tresult := make([]*dep.HealthService, 0)\n\n\t\tif len(s) == 0 || s[0] == \"\" {\n\t\t\treturn result, nil\n\t\t}\n\n\t\td, err := dep.ParseHealthServices(s...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]*dep.HealthService), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ servicesFunc returns or accumulates catalog services dependencies.\nfunc servicesFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(...string) ([]*dep.CatalogService, error) {\n\treturn func(s ...string) ([]*dep.CatalogService, error) {\n\t\tresult := make([]*dep.CatalogService, 0)\n\n\t\td, err := dep.ParseCatalogServices(s...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\treturn value.([]*dep.CatalogService), nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n}\n\n\/\/ treeFunc returns or accumulates keyPrefix dependencies.\nfunc treeFunc(brain *Brain,\n\tused, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) {\n\treturn func(s string) ([]*dep.KeyPair, error) {\n\t\tresult := make([]*dep.KeyPair, 0)\n\n\t\tif len(s) == 0 {\n\t\t\treturn result, nil\n\t\t}\n\n\t\td, err := dep.ParseStoreKeyPrefix(s)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\taddDependency(used, d)\n\n\t\t\/\/ Only return non-empty top-level keys\n\t\tif value, ok := brain.Recall(d); ok {\n\t\t\tfor _, pair := range value.([]*dep.KeyPair) {\n\t\t\t\tparts := strings.Split(pair.Key, \"\/\")\n\t\t\t\tif parts[len(parts)-1] != \"\" {\n\t\t\t\t\tresult = append(result, pair)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, nil\n\t\t}\n\n\t\taddDependency(missing, d)\n\n\t\treturn result, nil\n\t}\n\n}\n\n\/\/ byKey accepts a slice of KV pairs and returns a map of the top-level\n\/\/ key to all its subkeys. For example:\n\/\/\n\/\/\t\telasticsearch\/a \/\/=> \"1\"\n\/\/\t\telasticsearch\/b \/\/=> \"2\"\n\/\/\t\tredis\/a\/b \/\/=> \"3\"\n\/\/\n\/\/ Passing the result from Consul through byTag would yield:\n\/\/\n\/\/ \t\tmap[string]map[string]string{\n\/\/\t \t\"elasticsearch\": &dep.KeyPair{\"a\": \"1\"}, &dep.KeyPair{\"b\": \"2\"},\n\/\/\t\t\t\"redis\": &dep.KeyPair{\"a\/b\": \"3\"}\n\/\/\t\t}\n\/\/\n\/\/ Note that the top-most key is stripped from the Key value. Keys that have no\n\/\/ prefix after stripping are removed from the list.\nfunc byKey(pairs []*dep.KeyPair) (map[string][]*dep.KeyPair, error) {\n\tm := make(map[string][]*dep.KeyPair)\n\tfor _, pair := range pairs {\n\t\tparts := strings.Split(pair.Key, \"\/\")\n\t\ttop := parts[0]\n\t\tkey := strings.Join(parts[1:], \"\/\")\n\n\t\tif key == \"\" {\n\t\t\t\/\/ Do not add a key if it has no prefix after stripping.\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := m[top]; !ok {\n\t\t\tm[top] = make([]*dep.KeyPair, 0, 1)\n\t\t}\n\n\t\tdup, err := copystructure.Copy(pair)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnewPair := dup.(*dep.KeyPair)\n\n\t\tnewPair.Key = key\n\t\tm[top] = append(m[top], newPair)\n\t}\n\n\treturn m, nil\n}\n\n\/\/\n\/\/\t\telasticsearch\/a => \"1\"\n\/\/\t\telasticsearch\/b => \"2\"\n\/\/\t\telasticsearch\/c => \"2\"\n\/\/\t\tredis\/a\/b => \"3\"\n\/\/\t\tredis\/a\/c => \"3\"\n\/\/\n\/\/\n\/\/ {\n\/\/ \t\"elasticsearch\": {\n\/\/ \t\t\"a\": 1,\n\/\/ \t\t\"b\": 2,\n\/\/ \t\t\"c\": 3,\n\/\/ \t},\n\/\/ \t\"redis\": {\n\/\/ \t\t\"a\": {\n\/\/ \t\t\t\"b\": 3\n\/\/ \t\t\t\"c\": 3\n\/\/ \t\t}\n\/\/ \t}\n\/\/ }\n\/\/\n\nfunc by(pairs []*dep.KeyPair) (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\n\tfor _, pair := range pairs {\n\t\tparts := strings.Split(pair.Key, \"\/\")\n\n\t\t\/\/ Create the deep map\n\t\tvar ref interface{}\n\t\t\/\/ ref := m\n\t\tfor i := 0; i < len(parts)-1; i++ {\n\t\t\ttyped := ref.(map[string]interface{})\n\t\t\tpart := parts[i]\n\t\t\tif _, ok := typed[part]; !ok {\n\t\t\t\ttyped[part] = make(map[string]interface{})\n\t\t\t}\n\t\t\tref = typed[part]\n\t\t}\n\n\t\tpart := parts[len(parts)-1]\n\t\tref.(map[string]interface{})[part] = &dep.KeyPair{\n\t\t\tPath: pair.Path,\n\t\t\tKey: part,\n\t\t\tValue: pair.Value,\n\t\t\tCreateIndex: pair.CreateIndex,\n\t\t\tModifyIndex: pair.ModifyIndex,\n\t\t\tLockIndex: pair.LockIndex,\n\t\t\tFlags: pair.Flags,\n\t\t\tSession: pair.Session,\n\t\t}\n\t}\n\n\tprintln(fmt.Sprintf(\"%#v\", m))\n\n\treturn m, nil\n}\n\n\/\/ byTag is a template func that takes the provided services and\n\/\/ produces a map based on Service tags.\n\/\/\n\/\/ The map key is a string representing the service tag. The map value is a\n\/\/ slice of Services which have the tag assigned.\nfunc byTag(in []*dep.HealthService) (map[string][]*dep.HealthService, error) {\n\tm := make(map[string][]*dep.HealthService)\n\tfor _, s := range in {\n\t\tfor _, t := range s.Tags {\n\t\t\tm[t] = append(m[t], s)\n\t\t}\n\t}\n\treturn m, nil\n}\n\n\/\/ env returns the value of the environment variable set\nfunc env(s string) (string, error) {\n\treturn os.Getenv(s), nil\n}\n\n\/\/ loop accepts varying parameters and differs its behavior. If given one\n\/\/ parameter, loop will return a goroutine that begins at 0 and loops until the\n\/\/ given int, increasing the index by 1 each iteration. If given two parameters,\n\/\/ loop will return a goroutine that begins at the first parameter and loops\n\/\/ up to but not including the second parameter.\n\/\/\n\/\/ \/\/ Prints 0 1 2 3 4\n\/\/ \t\tfor _, i := range loop(5) {\n\/\/ \t\t\tprint(i)\n\/\/ \t\t}\n\/\/\n\/\/ \/\/ Prints 5 6 7\n\/\/ \t\tfor _, i := range loop(5, 8) {\n\/\/ \t\t\tprint(i)\n\/\/ \t\t}\n\/\/\nfunc loop(ints ...int) (<-chan int, error) {\n\tvar start, stop int\n\tswitch len(ints) {\n\tcase 1:\n\t\tstart, stop = 0, ints[0]\n\tcase 2:\n\t\tstart, stop = ints[0], ints[1]\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"loop: wrong number of arguments, expected 1 or 2\"+\n\t\t\t\", but got %d\", len(ints))\n\t}\n\n\tch := make(chan int)\n\n\tgo func() {\n\t\tfor i := start; i < stop; i++ {\n\t\t\tch <- i\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch, nil\n}\n\n\/\/ parseJSON returns a structure for valid JSON\nfunc parseJSON(s string) (interface{}, error) {\n\tif s == \"\" {\n\t\treturn make([]interface{}, 0), nil\n\t}\n\n\tvar data interface{}\n\tif err := json.Unmarshal([]byte(s), &data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}\n\n\/\/ replaceAll replaces all occurrences of a value in a string with the given\n\/\/ replacement value.\nfunc replaceAll(f, t, s string) (string, error) {\n\treturn strings.Replace(s, f, t, -1), nil\n}\n\n\/\/ regexReplaceAll replaces all occurrences of a regular expression with\n\/\/ the given replacement value.\nfunc regexReplaceAll(re, pl, s string) (string, error) {\n\tcompiled, err := regexp.Compile(re)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn compiled.ReplaceAllString(s, pl), nil\n}\n\n\/\/ timestamp returns the current UNIX timestamp in UTC. If an argument is\n\/\/ specified, it will be used to format the timestamp.\nfunc timestamp(s ...string) (string, error) {\n\tswitch len(s) {\n\tcase 0:\n\t\treturn now().Format(time.RFC3339), nil\n\tcase 1:\n\t\treturn now().Format(s[0]), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"timestamp: wrong number of arguments, expected 0 or 1\"+\n\t\t\t\", but got %d\", len(s))\n\t}\n}\n\n\/\/ toLower converts the given string (usually by a pipe) to lowercase.\nfunc toLower(s string) (string, error) {\n\treturn strings.ToLower(s), nil\n}\n\n\/\/ toTitle converts the given string (usually by a pipe) to titlecase.\nfunc toTitle(s string) (string, error) {\n\treturn strings.Title(s), nil\n}\n\n\/\/ toUpper converts the given string (usually by a pipe) to uppercase.\nfunc toUpper(s string) (string, error) {\n\treturn strings.ToUpper(s), nil\n}\n\n\/\/ addDependency adds the given Dependency to the map.\nfunc addDependency(m map[string]dep.Dependency, d dep.Dependency) {\n\tif _, ok := m[d.HashCode()]; !ok {\n\t\tm[d.HashCode()] = d\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage acceptance\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/GoogleCloudPlatform\/buildpacks\/internal\/acceptance\"\n)\n\nfunc init() {\n\tacceptance.DefineFlags()\n}\n\nfunc TestAcceptancePython(t *testing.T) {\n\tbuilder, cleanup := acceptance.CreateBuilder(t)\n\tt.Cleanup(cleanup)\n\n\ttestCases := []acceptance.Test{\n\t\t{\n\t\t\tName: \"entrypoint from procfile web\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"entrypoint from procfile custom\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tPath: \"\/custom\",\n\t\t\tEntrypoint: \"custom\", \/\/ Must match the non-web process in Procfile.\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"entrypoint from env\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tPath: \"\/custom\",\n\t\t\tEnv: []string{\"GOOGLE_ENTRYPOINT=gunicorn -b :8080 custom:app\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"entrypoint with env var\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tPath: \"\/env?want=bar\",\n\t\t\tEnv: []string{\"GOOGLE_ENTRYPOINT=FOO=bar gunicorn -b :8080 main:app\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"runtime version from env\",\n\t\t\tApp: \"python\/version\",\n\t\t\tPath: \"\/version?want=3.8.0\",\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME_VERSION=3.8.0\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"runtime version from .python-version\",\n\t\t\tApp: \"python\/version\",\n\t\t\tPath: \"\/version?want=3.8.1\",\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"selected via GOOGLE_RUNTIME\",\n\t\t\tApp: \"override\",\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME=python\", \"GOOGLE_ENTRYPOINT=gunicorn -b :8080 main:app\"},\n\t\t\tMustUse: []string{pythonRuntime},\n\t\t\tMustNotUse: []string{goRuntime, javaRuntime, nodeRuntime},\n\t\t},\n\t\t{\n\t\t\tName: \"python with client-side scripts correctly builds as a python app\",\n\t\t\tApp: \"python\/scripts\",\n\t\t\tEnv: []string{\"GOOGLE_ENTRYPOINT=gunicorn -b :8080 main:app\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t}\n\t\/\/ Tests for all published versions of Python.\n\t\/\/ Unlike with the other languages, we control the versions published to GCS.\n\tfor _, v := range []string{\n\t\t\"3.7.0\",\n\t\t\"3.7.1\",\n\t\t\"3.7.2\",\n\t\t\"3.7.3\",\n\t\t\"3.7.4\",\n\t\t\"3.7.5\",\n\t\t\"3.7.6\",\n\t\t\"3.7.7\",\n\t\t\"3.7.8\",\n\t\t\"3.7.9\",\n\t\t\"3.7.10\",\n\t\t\"3.8.0\",\n\t\t\"3.8.1\",\n\t\t\"3.8.2\",\n\t\t\"3.8.3\",\n\t\t\"3.8.4\",\n\t\t\"3.8.5\",\n\t\t\"3.8.6\",\n\t\t\"3.8.7\",\n\t\t\"3.8.8\",\n\t\t\"3.8.9\",\n\t\t\"3.8.10\",\n\t\t\"3.9.0\",\n\t\t\"3.9.1\",\n\t\t\"3.9.2\",\n\t\t\/\/ 3.9.3 is not currently available.\n\t\t\"3.9.4\",\n\t\t\"3.9.5\",\n\t} {\n\t\ttestCases = append(testCases, acceptance.Test{\n\t\t\tName: \"runtime version \" + v,\n\t\t\tApp: \"python\/version\",\n\t\t\tPath: \"\/version?want=\" + v,\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME_VERSION=\" + v},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t})\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tacceptance.TestApp(t, builder, tc)\n\t\t})\n\t}\n}\n\nfunc TestFailuresPython(t *testing.T) {\n\tbuilder, cleanup := acceptance.CreateBuilder(t)\n\tt.Cleanup(cleanup)\n\n\ttestCases := []acceptance.FailureTest{\n\t\t{\n\t\t\tName: \"bad runtime version\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME_VERSION=BAD_NEWS_BEARS\", \"GOOGLE_ENTRYPOINT=gunicorn -b :8080 main:app\"},\n\t\t\tMustMatch: \"Runtime version BAD_NEWS_BEARS does not exist\",\n\t\t},\n\t\t{\n\t\t\tName: \"python-version empty\",\n\t\t\tApp: \"python\/empty_version\",\n\t\t\tMustMatch: \".python-version exists but does not specify a version\",\n\t\t},\n\t\t{\n\t\t\tName: \"missing entrypoint\",\n\t\t\tApp: \"python\/missing_entrypoint\",\n\t\t\tMustMatch: `for Python, an entrypoint must be manually set, either with \"GOOGLE_ENTRYPOINT\" env var or by creating a \"Procfile\" file`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tacceptance.TestBuildFailure(t, builder, tc)\n\t\t})\n\t}\n}\n<commit_msg>Sync python runtime buildpack acceptance tests with most up to date published versions.<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage acceptance\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/GoogleCloudPlatform\/buildpacks\/internal\/acceptance\"\n)\n\nfunc init() {\n\tacceptance.DefineFlags()\n}\n\nfunc TestAcceptancePython(t *testing.T) {\n\tbuilder, cleanup := acceptance.CreateBuilder(t)\n\tt.Cleanup(cleanup)\n\n\ttestCases := []acceptance.Test{\n\t\t{\n\t\t\tName: \"entrypoint from procfile web\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"entrypoint from procfile custom\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tPath: \"\/custom\",\n\t\t\tEntrypoint: \"custom\", \/\/ Must match the non-web process in Procfile.\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"entrypoint from env\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tPath: \"\/custom\",\n\t\t\tEnv: []string{\"GOOGLE_ENTRYPOINT=gunicorn -b :8080 custom:app\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"entrypoint with env var\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tPath: \"\/env?want=bar\",\n\t\t\tEnv: []string{\"GOOGLE_ENTRYPOINT=FOO=bar gunicorn -b :8080 main:app\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"runtime version from env\",\n\t\t\tApp: \"python\/version\",\n\t\t\tPath: \"\/version?want=3.8.0\",\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME_VERSION=3.8.0\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"runtime version from .python-version\",\n\t\t\tApp: \"python\/version\",\n\t\t\tPath: \"\/version?want=3.8.1\",\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t\t{\n\t\t\tName: \"selected via GOOGLE_RUNTIME\",\n\t\t\tApp: \"override\",\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME=python\", \"GOOGLE_ENTRYPOINT=gunicorn -b :8080 main:app\"},\n\t\t\tMustUse: []string{pythonRuntime},\n\t\t\tMustNotUse: []string{goRuntime, javaRuntime, nodeRuntime},\n\t\t},\n\t\t{\n\t\t\tName: \"python with client-side scripts correctly builds as a python app\",\n\t\t\tApp: \"python\/scripts\",\n\t\t\tEnv: []string{\"GOOGLE_ENTRYPOINT=gunicorn -b :8080 main:app\"},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t},\n\t}\n\t\/\/ Tests for two most recent published patch versions of Python.\n\t\/\/ Unlike with the other languages, we control the versions published to GCS.\n\tfor _, v := range []string{\n\t\t\"3.7.11\",\n\t\t\"3.7.12\",\n\t\t\"3.8.11\",\n\t\t\"3.8.12\",\n\t\t\"3.9.6\",\n\t\t\"3.9.7\",\n\t\t\"3.9.8\",\n\t} {\n\t\ttestCases = append(testCases, acceptance.Test{\n\t\t\tName: \"runtime version \" + v,\n\t\t\tApp: \"python\/version\",\n\t\t\tPath: \"\/version?want=\" + v,\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME_VERSION=\" + v},\n\t\t\tMustUse: []string{pythonRuntime, pythonPIP, entrypoint},\n\t\t})\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tacceptance.TestApp(t, builder, tc)\n\t\t})\n\t}\n}\n\nfunc TestFailuresPython(t *testing.T) {\n\tbuilder, cleanup := acceptance.CreateBuilder(t)\n\tt.Cleanup(cleanup)\n\n\ttestCases := []acceptance.FailureTest{\n\t\t{\n\t\t\tName: \"bad runtime version\",\n\t\t\tApp: \"python\/simple\",\n\t\t\tEnv: []string{\"GOOGLE_RUNTIME_VERSION=BAD_NEWS_BEARS\", \"GOOGLE_ENTRYPOINT=gunicorn -b :8080 main:app\"},\n\t\t\tMustMatch: \"Runtime version BAD_NEWS_BEARS does not exist\",\n\t\t},\n\t\t{\n\t\t\tName: \"python-version empty\",\n\t\t\tApp: \"python\/empty_version\",\n\t\t\tMustMatch: \".python-version exists but does not specify a version\",\n\t\t},\n\t\t{\n\t\t\tName: \"missing entrypoint\",\n\t\t\tApp: \"python\/missing_entrypoint\",\n\t\t\tMustMatch: `for Python, an entrypoint must be manually set, either with \"GOOGLE_ENTRYPOINT\" env var or by creating a \"Procfile\" file`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tacceptance.TestBuildFailure(t, builder, tc)\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package llbsolver\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/moby\/buildkit\/solver\"\n\t\"github.com\/moby\/buildkit\/solver\/pb\"\n\t\"github.com\/moby\/buildkit\/source\"\n\t\"github.com\/moby\/buildkit\/util\/entitlements\"\n\tdigest \"github.com\/opencontainers\/go-digest\"\n\tspecs \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype vertex struct {\n\tsys interface{}\n\toptions solver.VertexOptions\n\tinputs []solver.Edge\n\tdigest digest.Digest\n\tname string\n}\n\nfunc (v *vertex) Digest() digest.Digest {\n\treturn v.digest\n}\n\nfunc (v *vertex) Sys() interface{} {\n\treturn v.sys\n}\n\nfunc (v *vertex) Options() solver.VertexOptions {\n\treturn v.options\n}\n\nfunc (v *vertex) Inputs() []solver.Edge {\n\treturn v.inputs\n}\n\nfunc (v *vertex) Name() string {\n\tif name, ok := v.options.Description[\"llb.customname\"]; ok {\n\t\treturn name\n\t}\n\treturn v.name\n}\n\ntype LoadOpt func(*pb.Op, *pb.OpMetadata, *solver.VertexOptions) error\n\nfunc WithValidateCaps() LoadOpt {\n\tcs := pb.Caps.CapSet(pb.Caps.All())\n\treturn func(_ *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\tif md != nil {\n\t\t\tfor c := range md.Caps {\n\t\t\t\tif err := cs.Supports(c); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc WithCacheSources(cms []solver.CacheManager) LoadOpt {\n\treturn func(_ *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\topt.CacheSources = cms\n\t\treturn nil\n\t}\n}\n\nfunc RuntimePlatforms(p []specs.Platform) LoadOpt {\n\tvar defaultPlatform *pb.Platform\n\tpp := make([]specs.Platform, len(p))\n\tfor i := range p {\n\t\tpp[i] = platforms.Normalize(p[i])\n\t}\n\treturn func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\tif op.Platform == nil {\n\t\t\tif defaultPlatform == nil {\n\t\t\t\tp := platforms.DefaultSpec()\n\t\t\t\tdefaultPlatform = &pb.Platform{\n\t\t\t\t\tOS: p.OS,\n\t\t\t\t\tArchitecture: p.Architecture,\n\t\t\t\t}\n\t\t\t}\n\t\t\top.Platform = defaultPlatform\n\t\t}\n\t\tif _, ok := op.Op.(*pb.Op_Exec); ok {\n\t\t\tvar found bool\n\t\t\tfor _, pp := range pp {\n\t\t\t\tif pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\treturn errors.Errorf(\"runtime execution on platform %s not supported\", platforms.Format(specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant}))\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc ValidateEntitlements(ent entitlements.Set) LoadOpt {\n\treturn func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\tswitch op := op.Op.(type) {\n\t\tcase *pb.Op_Exec:\n\t\t\tif op.Exec.Network == pb.NetMode_HOST {\n\t\t\t\tif !ent.Allowed(entitlements.EntitlementNetworkHost) {\n\t\t\t\t\treturn errors.Errorf(\"%s is not allowed\", entitlements.EntitlementNetworkHost)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif op.Exec.Network == pb.NetMode_NONE {\n\t\t\t\tif !ent.Allowed(entitlements.EntitlementNetworkNone) {\n\t\t\t\t\treturn errors.Errorf(\"%s is not allowed\", entitlements.EntitlementNetworkNone)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) {\n\treturn loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) {\n\t\topMetadata := def.Metadata[dgst]\n\t\tvtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn vtx, nil\n\t})\n}\n\nfunc newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(digest.Digest) (solver.Vertex, error), opts ...LoadOpt) (*vertex, error) {\n\topt := solver.VertexOptions{}\n\tif opMeta != nil {\n\t\topt.IgnoreCache = opMeta.IgnoreCache\n\t\topt.Description = opMeta.Description\n\t\tif opMeta.ExportCache != nil {\n\t\t\topt.ExportCache = &opMeta.ExportCache.Value\n\t\t}\n\t}\n\tfor _, fn := range opts {\n\t\tif err := fn(op, opMeta, &opt); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)}\n\tfor _, in := range op.Inputs {\n\t\tsub, err := load(in.Digest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvtx.inputs = append(vtx.inputs, solver.Edge{Index: solver.Index(in.Index), Vertex: sub})\n\t}\n\treturn vtx, nil\n}\n\n\/\/ loadLLB loads LLB.\n\/\/ fn is executed sequentially.\nfunc loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) {\n\tif len(def.Def) == 0 {\n\t\treturn solver.Edge{}, errors.New(\"invalid empty definition\")\n\t}\n\n\tallOps := make(map[digest.Digest]*pb.Op)\n\n\tvar dgst digest.Digest\n\n\tfor _, dt := range def.Def {\n\t\tvar op pb.Op\n\t\tif err := (&op).Unmarshal(dt); err != nil {\n\t\t\treturn solver.Edge{}, errors.Wrap(err, \"failed to parse llb proto op\")\n\t\t}\n\t\tdgst = digest.FromBytes(dt)\n\t\tallOps[dgst] = &op\n\t}\n\n\tlastOp := allOps[dgst]\n\tdelete(allOps, dgst)\n\tdgst = lastOp.Inputs[0].Digest\n\n\tcache := make(map[digest.Digest]solver.Vertex)\n\n\tvar rec func(dgst digest.Digest) (solver.Vertex, error)\n\trec = func(dgst digest.Digest) (solver.Vertex, error) {\n\t\tif v, ok := cache[dgst]; ok {\n\t\t\treturn v, nil\n\t\t}\n\t\top, ok := allOps[dgst]\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(\"invalid missing input digest %s\", dgst)\n\t\t}\n\t\tv, err := fn(dgst, op, rec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcache[dgst] = v\n\t\treturn v, nil\n\t}\n\n\tv, err := rec(dgst)\n\tif err != nil {\n\t\treturn solver.Edge{}, err\n\t}\n\treturn solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil\n}\n\nfunc llbOpName(op *pb.Op) string {\n\tswitch op := op.Op.(type) {\n\tcase *pb.Op_Source:\n\t\tif id, err := source.FromLLB(op, nil); err == nil {\n\t\t\tif id, ok := id.(*source.LocalIdentifier); ok {\n\t\t\t\tif len(id.IncludePatterns) == 1 {\n\t\t\t\t\treturn op.Source.Identifier + \" (\" + id.IncludePatterns[0] + \")\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn op.Source.Identifier\n\tcase *pb.Op_Exec:\n\t\treturn strings.Join(op.Exec.Meta.Args, \" \")\n\tcase *pb.Op_Build:\n\t\treturn \"build\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n<commit_msg>Don't miss the Variant for arm2 platform<commit_after>package llbsolver\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/moby\/buildkit\/solver\"\n\t\"github.com\/moby\/buildkit\/solver\/pb\"\n\t\"github.com\/moby\/buildkit\/source\"\n\t\"github.com\/moby\/buildkit\/util\/entitlements\"\n\tdigest \"github.com\/opencontainers\/go-digest\"\n\tspecs \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype vertex struct {\n\tsys interface{}\n\toptions solver.VertexOptions\n\tinputs []solver.Edge\n\tdigest digest.Digest\n\tname string\n}\n\nfunc (v *vertex) Digest() digest.Digest {\n\treturn v.digest\n}\n\nfunc (v *vertex) Sys() interface{} {\n\treturn v.sys\n}\n\nfunc (v *vertex) Options() solver.VertexOptions {\n\treturn v.options\n}\n\nfunc (v *vertex) Inputs() []solver.Edge {\n\treturn v.inputs\n}\n\nfunc (v *vertex) Name() string {\n\tif name, ok := v.options.Description[\"llb.customname\"]; ok {\n\t\treturn name\n\t}\n\treturn v.name\n}\n\ntype LoadOpt func(*pb.Op, *pb.OpMetadata, *solver.VertexOptions) error\n\nfunc WithValidateCaps() LoadOpt {\n\tcs := pb.Caps.CapSet(pb.Caps.All())\n\treturn func(_ *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\tif md != nil {\n\t\t\tfor c := range md.Caps {\n\t\t\t\tif err := cs.Supports(c); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc WithCacheSources(cms []solver.CacheManager) LoadOpt {\n\treturn func(_ *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\topt.CacheSources = cms\n\t\treturn nil\n\t}\n}\n\nfunc RuntimePlatforms(p []specs.Platform) LoadOpt {\n\tvar defaultPlatform *pb.Platform\n\tpp := make([]specs.Platform, len(p))\n\tfor i := range p {\n\t\tpp[i] = platforms.Normalize(p[i])\n\t}\n\treturn func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\tif op.Platform == nil {\n\t\t\tif defaultPlatform == nil {\n\t\t\t\tp := platforms.DefaultSpec()\n\t\t\t\tdefaultPlatform = &pb.Platform{\n\t\t\t\t\tOS: p.OS,\n\t\t\t\t\tArchitecture: p.Architecture,\n\t\t\t\t\tVariant: p.Variant,\n\t\t\t\t}\n\t\t\t}\n\t\t\top.Platform = defaultPlatform\n\t\t}\n\t\tplatform := specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant}\n\t\tnormalizedPlatform := platforms.Normalize(platform)\n\n\t\top.Platform = &pb.Platform{\n\t\t\tOS: normalizedPlatform.OS,\n\t\t\tArchitecture: normalizedPlatform.Architecture,\n\t\t\tVariant: normalizedPlatform.Variant,\n\t\t}\n\n\t\tif _, ok := op.Op.(*pb.Op_Exec); ok {\n\t\t\tvar found bool\n\t\t\tfor _, pp := range pp {\n\t\t\t\tif pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\treturn errors.Errorf(\"runtime execution on platform %s not supported\", platforms.Format(specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant}))\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc ValidateEntitlements(ent entitlements.Set) LoadOpt {\n\treturn func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {\n\t\tswitch op := op.Op.(type) {\n\t\tcase *pb.Op_Exec:\n\t\t\tif op.Exec.Network == pb.NetMode_HOST {\n\t\t\t\tif !ent.Allowed(entitlements.EntitlementNetworkHost) {\n\t\t\t\t\treturn errors.Errorf(\"%s is not allowed\", entitlements.EntitlementNetworkHost)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif op.Exec.Network == pb.NetMode_NONE {\n\t\t\t\tif !ent.Allowed(entitlements.EntitlementNetworkNone) {\n\t\t\t\t\treturn errors.Errorf(\"%s is not allowed\", entitlements.EntitlementNetworkNone)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) {\n\treturn loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) {\n\t\topMetadata := def.Metadata[dgst]\n\t\tvtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn vtx, nil\n\t})\n}\n\nfunc newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(digest.Digest) (solver.Vertex, error), opts ...LoadOpt) (*vertex, error) {\n\topt := solver.VertexOptions{}\n\tif opMeta != nil {\n\t\topt.IgnoreCache = opMeta.IgnoreCache\n\t\topt.Description = opMeta.Description\n\t\tif opMeta.ExportCache != nil {\n\t\t\topt.ExportCache = &opMeta.ExportCache.Value\n\t\t}\n\t}\n\tfor _, fn := range opts {\n\t\tif err := fn(op, opMeta, &opt); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)}\n\tfor _, in := range op.Inputs {\n\t\tsub, err := load(in.Digest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvtx.inputs = append(vtx.inputs, solver.Edge{Index: solver.Index(in.Index), Vertex: sub})\n\t}\n\treturn vtx, nil\n}\n\n\/\/ loadLLB loads LLB.\n\/\/ fn is executed sequentially.\nfunc loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) {\n\tif len(def.Def) == 0 {\n\t\treturn solver.Edge{}, errors.New(\"invalid empty definition\")\n\t}\n\n\tallOps := make(map[digest.Digest]*pb.Op)\n\n\tvar dgst digest.Digest\n\n\tfor _, dt := range def.Def {\n\t\tvar op pb.Op\n\t\tif err := (&op).Unmarshal(dt); err != nil {\n\t\t\treturn solver.Edge{}, errors.Wrap(err, \"failed to parse llb proto op\")\n\t\t}\n\t\tdgst = digest.FromBytes(dt)\n\t\tallOps[dgst] = &op\n\t}\n\n\tlastOp := allOps[dgst]\n\tdelete(allOps, dgst)\n\tdgst = lastOp.Inputs[0].Digest\n\n\tcache := make(map[digest.Digest]solver.Vertex)\n\n\tvar rec func(dgst digest.Digest) (solver.Vertex, error)\n\trec = func(dgst digest.Digest) (solver.Vertex, error) {\n\t\tif v, ok := cache[dgst]; ok {\n\t\t\treturn v, nil\n\t\t}\n\t\top, ok := allOps[dgst]\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(\"invalid missing input digest %s\", dgst)\n\t\t}\n\t\tv, err := fn(dgst, op, rec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcache[dgst] = v\n\t\treturn v, nil\n\t}\n\n\tv, err := rec(dgst)\n\tif err != nil {\n\t\treturn solver.Edge{}, err\n\t}\n\treturn solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil\n}\n\nfunc llbOpName(op *pb.Op) string {\n\tswitch op := op.Op.(type) {\n\tcase *pb.Op_Source:\n\t\tif id, err := source.FromLLB(op, nil); err == nil {\n\t\t\tif id, ok := id.(*source.LocalIdentifier); ok {\n\t\t\t\tif len(id.IncludePatterns) == 1 {\n\t\t\t\t\treturn op.Source.Identifier + \" (\" + id.IncludePatterns[0] + \")\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn op.Source.Identifier\n\tcase *pb.Op_Exec:\n\t\treturn strings.Join(op.Exec.Meta.Args, \" \")\n\tcase *pb.Op_Build:\n\t\treturn \"build\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/terraform\/config\"\n)\n\n\/\/ ResourceProvisionerConfig is used to pair a provisioner\n\/\/ with its provided configuration. This allows us to use singleton\n\/\/ instances of each ResourceProvisioner and to keep the relevant\n\/\/ configuration instead of instantiating a new Provisioner for each\n\/\/ resource.\ntype ResourceProvisionerConfig struct {\n\tType string\n\tProvisioner ResourceProvisioner\n\tConfig *ResourceConfig\n\tRawConfig *config.RawConfig\n\tConnInfo *config.RawConfig\n}\n\n\/\/ Resource encapsulates a resource, its configuration, its provider,\n\/\/ its current state, and potentially a desired diff from the state it\n\/\/ wants to reach.\ntype Resource struct {\n\t\/\/ These are all used by the new EvalNode stuff.\n\tName string\n\tType string\n\tCountIndex int\n\n\t\/\/ These aren't really used anymore anywhere, but we keep them around\n\t\/\/ since we haven't done a proper cleanup yet.\n\tId string\n\tInfo *InstanceInfo\n\tConfig *ResourceConfig\n\tDependencies []string\n\tDiff *InstanceDiff\n\tProvider ResourceProvider\n\tState *InstanceState\n\tProvisioners []*ResourceProvisionerConfig\n\tFlags ResourceFlag\n}\n\n\/\/ ResourceKind specifies what kind of instance we're working with, whether\n\/\/ its a primary instance, a tainted instance, or an orphan.\ntype ResourceFlag byte\n\nconst (\n\tFlagPrimary ResourceFlag = 1 << iota\n\tFlagTainted\n\tFlagOrphan\n\tFlagReplacePrimary\n\tFlagDeposed\n)\n\n\/\/ InstanceInfo is used to hold information about the instance and\/or\n\/\/ resource being modified.\ntype InstanceInfo struct {\n\t\/\/ Id is a unique name to represent this instance. This is not related\n\t\/\/ to InstanceState.ID in any way.\n\tId string\n\n\t\/\/ ModulePath is the complete path of the module containing this\n\t\/\/ instance.\n\tModulePath []string\n\n\t\/\/ Type is the resource type of this instance\n\tType string\n}\n\n\/\/ HumanId is a unique Id that is human-friendly and useful for UI elements.\nfunc (i *InstanceInfo) HumanId() string {\n\tif len(i.ModulePath) <= 1 {\n\t\treturn i.Id\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"module.%s.%s\",\n\t\tstrings.Join(i.ModulePath[1:], \".\"),\n\t\ti.Id)\n}\n\n\/\/ ResourceConfig holds the configuration given for a resource. This is\n\/\/ done instead of a raw `map[string]interface{}` type so that rich\n\/\/ methods can be added to it to make dealing with it easier.\ntype ResourceConfig struct {\n\tComputedKeys []string\n\tRaw map[string]interface{}\n\tConfig map[string]interface{}\n\n\traw *config.RawConfig\n}\n\n\/\/ NewResourceConfig creates a new ResourceConfig from a config.RawConfig.\nfunc NewResourceConfig(c *config.RawConfig) *ResourceConfig {\n\tresult := &ResourceConfig{raw: c}\n\tresult.interpolateForce()\n\treturn result\n}\n\n\/\/ CheckSet checks that the given list of configuration keys is\n\/\/ properly set. If not, errors are returned for each unset key.\n\/\/\n\/\/ This is useful to be called in the Validate method of a ResourceProvider.\nfunc (c *ResourceConfig) CheckSet(keys []string) []error {\n\tvar errs []error\n\n\tfor _, k := range keys {\n\t\tif !c.IsSet(k) {\n\t\t\terrs = append(errs, fmt.Errorf(\"%s must be set\", k))\n\t\t}\n\t}\n\n\treturn errs\n}\n\n\/\/ Get looks up a configuration value by key and returns the value.\n\/\/\n\/\/ The second return value is true if the get was successful. Get will\n\/\/ not succeed if the value is being computed.\nfunc (c *ResourceConfig) Get(k string) (interface{}, bool) {\n\t\/\/ First try to get it from c.Config since that has interpolated values\n\tresult, ok := c.get(k, c.Config)\n\tif ok {\n\t\treturn result, ok\n\t}\n\n\t\/\/ Otherwise, just get it from the raw config\n\treturn c.get(k, c.Raw)\n}\n\n\/\/ GetRaw looks up a configuration value by key and returns the value,\n\/\/ from the raw, uninterpolated config.\n\/\/\n\/\/ The second return value is true if the get was successful. Get will\n\/\/ not succeed if the value is being computed.\nfunc (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {\n\treturn c.get(k, c.Raw)\n}\n\n\/\/ IsComputed returns whether the given key is computed or not.\nfunc (c *ResourceConfig) IsComputed(k string) bool {\n\t_, ok := c.get(k, c.Config)\n\t_, okRaw := c.get(k, c.Raw)\n\treturn !ok && okRaw\n}\n\n\/\/ IsSet checks if the key in the configuration is set. A key is set if\n\/\/ it has a value or the value is being computed (is unknown currently).\n\/\/\n\/\/ This function should be used rather than checking the keys of the\n\/\/ raw configuration itself, since a key may be omitted from the raw\n\/\/ configuration if it is being computed.\nfunc (c *ResourceConfig) IsSet(k string) bool {\n\tif c == nil {\n\t\treturn false\n\t}\n\n\tfor _, ck := range c.ComputedKeys {\n\t\tif ck == k {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif _, ok := c.Get(k); ok {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (c *ResourceConfig) get(\n\tk string, raw map[string]interface{}) (interface{}, bool) {\n\tparts := strings.Split(k, \".\")\n\tif len(parts) == 1 && parts[0] == \"\" {\n\t\tparts = nil\n\t}\n\n\tvar current interface{} = raw\n\tfor _, part := range parts {\n\t\tif current == nil {\n\t\t\treturn nil, false\n\t\t}\n\n\t\tcv := reflect.ValueOf(current)\n\t\tswitch cv.Kind() {\n\t\tcase reflect.Map:\n\t\t\tv := cv.MapIndex(reflect.ValueOf(part))\n\t\t\tif !v.IsValid() {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tcurrent = v.Interface()\n\t\tcase reflect.Slice:\n\t\t\tif part == \"#\" {\n\t\t\t\tcurrent = cv.Len()\n\t\t\t} else {\n\t\t\t\ti, err := strconv.ParseInt(part, 0, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, false\n\t\t\t\t}\n\t\t\t\tif i >= int64(cv.Len()) {\n\t\t\t\t\treturn nil, false\n\t\t\t\t}\n\t\t\t\tcurrent = cv.Index(int(i)).Interface()\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unknown kind: %s\", cv.Kind()))\n\t\t}\n\t}\n\n\treturn current, true\n}\n\n\/\/ interpolateForce is a temporary thing. We want to get rid of interpolate\n\/\/ above and likewise this, but it can only be done after the f-ast-graph\n\/\/ refactor is complete.\nfunc (c *ResourceConfig) interpolateForce() {\n\tif c.raw == nil {\n\t\tvar err error\n\t\tc.raw, err = config.NewRawConfig(make(map[string]interface{}))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tc.ComputedKeys = c.raw.UnknownKeys()\n\tc.Raw = c.raw.Raw\n\tc.Config = c.raw.Config()\n}\n<commit_msg>core: Allow \".\" character in map keys<commit_after>package terraform\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/terraform\/config\"\n)\n\n\/\/ ResourceProvisionerConfig is used to pair a provisioner\n\/\/ with its provided configuration. This allows us to use singleton\n\/\/ instances of each ResourceProvisioner and to keep the relevant\n\/\/ configuration instead of instantiating a new Provisioner for each\n\/\/ resource.\ntype ResourceProvisionerConfig struct {\n\tType string\n\tProvisioner ResourceProvisioner\n\tConfig *ResourceConfig\n\tRawConfig *config.RawConfig\n\tConnInfo *config.RawConfig\n}\n\n\/\/ Resource encapsulates a resource, its configuration, its provider,\n\/\/ its current state, and potentially a desired diff from the state it\n\/\/ wants to reach.\ntype Resource struct {\n\t\/\/ These are all used by the new EvalNode stuff.\n\tName string\n\tType string\n\tCountIndex int\n\n\t\/\/ These aren't really used anymore anywhere, but we keep them around\n\t\/\/ since we haven't done a proper cleanup yet.\n\tId string\n\tInfo *InstanceInfo\n\tConfig *ResourceConfig\n\tDependencies []string\n\tDiff *InstanceDiff\n\tProvider ResourceProvider\n\tState *InstanceState\n\tProvisioners []*ResourceProvisionerConfig\n\tFlags ResourceFlag\n}\n\n\/\/ ResourceKind specifies what kind of instance we're working with, whether\n\/\/ its a primary instance, a tainted instance, or an orphan.\ntype ResourceFlag byte\n\n\/\/ InstanceInfo is used to hold information about the instance and\/or\n\/\/ resource being modified.\ntype InstanceInfo struct {\n\t\/\/ Id is a unique name to represent this instance. This is not related\n\t\/\/ to InstanceState.ID in any way.\n\tId string\n\n\t\/\/ ModulePath is the complete path of the module containing this\n\t\/\/ instance.\n\tModulePath []string\n\n\t\/\/ Type is the resource type of this instance\n\tType string\n}\n\n\/\/ HumanId is a unique Id that is human-friendly and useful for UI elements.\nfunc (i *InstanceInfo) HumanId() string {\n\tif len(i.ModulePath) <= 1 {\n\t\treturn i.Id\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"module.%s.%s\",\n\t\tstrings.Join(i.ModulePath[1:], \".\"),\n\t\ti.Id)\n}\n\n\/\/ ResourceConfig holds the configuration given for a resource. This is\n\/\/ done instead of a raw `map[string]interface{}` type so that rich\n\/\/ methods can be added to it to make dealing with it easier.\ntype ResourceConfig struct {\n\tComputedKeys []string\n\tRaw map[string]interface{}\n\tConfig map[string]interface{}\n\n\traw *config.RawConfig\n}\n\n\/\/ NewResourceConfig creates a new ResourceConfig from a config.RawConfig.\nfunc NewResourceConfig(c *config.RawConfig) *ResourceConfig {\n\tresult := &ResourceConfig{raw: c}\n\tresult.interpolateForce()\n\treturn result\n}\n\n\/\/ CheckSet checks that the given list of configuration keys is\n\/\/ properly set. If not, errors are returned for each unset key.\n\/\/\n\/\/ This is useful to be called in the Validate method of a ResourceProvider.\nfunc (c *ResourceConfig) CheckSet(keys []string) []error {\n\tvar errs []error\n\n\tfor _, k := range keys {\n\t\tif !c.IsSet(k) {\n\t\t\terrs = append(errs, fmt.Errorf(\"%s must be set\", k))\n\t\t}\n\t}\n\n\treturn errs\n}\n\n\/\/ Get looks up a configuration value by key and returns the value.\n\/\/\n\/\/ The second return value is true if the get was successful. Get will\n\/\/ not succeed if the value is being computed.\nfunc (c *ResourceConfig) Get(k string) (interface{}, bool) {\n\t\/\/ First try to get it from c.Config since that has interpolated values\n\tresult, ok := c.get(k, c.Config)\n\tif ok {\n\t\treturn result, ok\n\t}\n\n\t\/\/ Otherwise, just get it from the raw config\n\treturn c.get(k, c.Raw)\n}\n\n\/\/ GetRaw looks up a configuration value by key and returns the value,\n\/\/ from the raw, uninterpolated config.\n\/\/\n\/\/ The second return value is true if the get was successful. Get will\n\/\/ not succeed if the value is being computed.\nfunc (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {\n\treturn c.get(k, c.Raw)\n}\n\n\/\/ IsComputed returns whether the given key is computed or not.\nfunc (c *ResourceConfig) IsComputed(k string) bool {\n\t_, ok := c.get(k, c.Config)\n\t_, okRaw := c.get(k, c.Raw)\n\treturn !ok && okRaw\n}\n\n\/\/ IsSet checks if the key in the configuration is set. A key is set if\n\/\/ it has a value or the value is being computed (is unknown currently).\n\/\/\n\/\/ This function should be used rather than checking the keys of the\n\/\/ raw configuration itself, since a key may be omitted from the raw\n\/\/ configuration if it is being computed.\nfunc (c *ResourceConfig) IsSet(k string) bool {\n\tif c == nil {\n\t\treturn false\n\t}\n\n\tfor _, ck := range c.ComputedKeys {\n\t\tif ck == k {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif _, ok := c.Get(k); ok {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (c *ResourceConfig) get(\n\tk string, raw map[string]interface{}) (interface{}, bool) {\n\tparts := strings.Split(k, \".\")\n\tif len(parts) == 1 && parts[0] == \"\" {\n\t\tparts = nil\n\t}\n\n\tvar current interface{} = raw\n\tvar previous interface{} = nil\n\tfor i, part := range parts {\n\t\tif current == nil {\n\t\t\treturn nil, false\n\t\t}\n\n\t\tcv := reflect.ValueOf(current)\n\t\tswitch cv.Kind() {\n\t\tcase reflect.Map:\n\t\t\tprevious = current\n\t\t\tv := cv.MapIndex(reflect.ValueOf(part))\n\t\t\tif !v.IsValid() {\n\t\t\t\tif i > 0 && i != (len(parts)-1) {\n\t\t\t\t\ttryKey := strings.Join(parts[i:], \".\")\n\t\t\t\t\tv := cv.MapIndex(reflect.ValueOf(tryKey))\n\t\t\t\t\tif !v.IsValid() {\n\t\t\t\t\t\treturn nil, false\n\t\t\t\t\t}\n\t\t\t\t\treturn v.Interface(), true\n\t\t\t\t}\n\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tcurrent = v.Interface()\n\t\tcase reflect.Slice:\n\t\t\tprevious = current\n\t\t\tif part == \"#\" {\n\t\t\t\tcurrent = cv.Len()\n\t\t\t} else {\n\t\t\t\ti, err := strconv.ParseInt(part, 0, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, false\n\t\t\t\t}\n\t\t\t\tif i >= int64(cv.Len()) {\n\t\t\t\t\treturn nil, false\n\t\t\t\t}\n\t\t\t\tcurrent = cv.Index(int(i)).Interface()\n\t\t\t}\n\t\tcase reflect.String:\n\t\t\t\/\/ This happens when map keys contain \".\" and have a common\n\t\t\t\/\/ prefix so were split as path components above.\n\t\t\tactualKey := strings.Join(parts[i-1:], \".\")\n\t\t\tif prevMap, ok := previous.(map[string]interface{}); ok {\n\t\t\t\treturn prevMap[actualKey], true\n\t\t\t}\n\t\t\treturn nil, false\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unknown kind: %s\", cv.Kind()))\n\t\t}\n\t}\n\n\treturn current, true\n}\n\n\/\/ interpolateForce is a temporary thing. We want to get rid of interpolate\n\/\/ above and likewise this, but it can only be done after the f-ast-graph\n\/\/ refactor is complete.\nfunc (c *ResourceConfig) interpolateForce() {\n\tif c.raw == nil {\n\t\tvar err error\n\t\tc.raw, err = config.NewRawConfig(make(map[string]interface{}))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tc.ComputedKeys = c.raw.UnknownKeys()\n\tc.Raw = c.raw.Raw\n\tc.Config = c.raw.Config()\n}\n<|endoftext|>"} {"text":"<commit_before>package nux\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/toolkits\/file\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Mem struct {\n\tBuffers uint64\n\tCached uint64\n\tMemTotal uint64\n\tMemFree uint64\n\tSwapTotal uint64\n\tSwapUsed uint64\n\tSwapFree uint64\n\tMemAvailable uint64\n}\n\nfunc (this *Mem) String() string {\n\treturn fmt.Sprintf(\"<MemTotal:%d, MemFree:%d, MemAvailable:%s, Buffers:%d, Cached:%d...>\", this.MemTotal, this.MemFree, this.MemAvailable, this.Buffers, this.Cached)\n}\n\nvar Multi uint64 = 1024\n\nvar WANT = map[string]struct{}{\n\t\"Buffers:\": struct{}{},\n\t\"Cached:\": struct{}{},\n\t\"MemTotal:\": struct{}{},\n\t\"MemFree:\": struct{}{},\n\t\"SwapTotal:\": struct{}{},\n\t\"SwapFree:\": struct{}{},\n\t\"MemAvailable:\": struct{}{},\n}\n\nfunc MemInfo() (*Mem, error) {\n\tcontents, err := ioutil.ReadFile(\"\/proc\/meminfo\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemInfo := &Mem{}\n\n\treader := bufio.NewReader(bytes.NewBuffer(contents))\n\n\tfor {\n\t\tline, err := file.ReadLine(reader)\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfields := strings.Fields(string(line))\n\t\tfieldName := fields[0]\n\n\t\t_, ok := WANT[fieldName]\n\t\tif ok && len(fields) == 3 {\n\t\t\tval, numerr := strconv.ParseUint(fields[1], 10, 64)\n\t\t\tif numerr != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch fieldName {\n\t\t\tcase \"Buffers:\":\n\t\t\t\tmemInfo.Buffers = val * Multi\n\t\t\tcase \"Cached:\":\n\t\t\t\tmemInfo.Cached = val * Multi\n\t\t\tcase \"MemTotal:\":\n\t\t\t\tmemInfo.MemTotal = val * Multi\n\t\t\tcase \"MemFree:\":\n\t\t\t\tmemInfo.MemFree = val * Multi\n\t\t\tcase \"SwapTotal:\":\n\t\t\t\tmemInfo.SwapTotal = val * Multi\n\t\t\tcase \"SwapFree:\":\n\t\t\t\tmemInfo.SwapFree = val * Multi\n\t\t\tcase \"MemAvailable:\":\n\t\t\t\tmemInfo.MemAvailable = val * Multi\n\t\t\t}\n\t\t}\n\t}\n\n\tmemInfo.SwapUsed = memInfo.SwapTotal - memInfo.SwapFree\n\n\treturn memInfo, nil\n}\n<commit_msg>Update meminfo.go<commit_after>package nux\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/toolkits\/file\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Mem struct {\n\tBuffers uint64\n\tCached uint64\n\tMemTotal uint64\n\tMemFree uint64\n\tMemAvailable uint64\n\tSwapTotal uint64\n\tSwapUsed uint64\n\tSwapFree uint64\n}\n\nfunc (this *Mem) String() string {\n\treturn fmt.Sprintf(\"<MemTotal:%d, MemFree:%d, MemAvailable:%d, Buffers:%d, Cached:%d...>\", this.MemTotal, this.MemFree, this.MemAvailable, this.Buffers, this.Cached)\n}\n\nvar Multi uint64 = 1024\n\nvar WANT = map[string]struct{}{\n\t\"Buffers:\": struct{}{},\n\t\"Cached:\": struct{}{},\n\t\"MemTotal:\": struct{}{},\n\t\"MemFree:\": struct{}{},\n\t\"MemAvailable:\": struct{}{},\n\t\"SwapTotal:\": struct{}{},\n\t\"SwapFree:\": struct{}{},\n}\n\nfunc MemInfo() (*Mem, error) {\n\tcontents, err := ioutil.ReadFile(\"\/proc\/meminfo\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemInfo := &Mem{}\n\n\treader := bufio.NewReader(bytes.NewBuffer(contents))\n\n\tfor {\n\t\tline, err := file.ReadLine(reader)\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfields := strings.Fields(string(line))\n\t\tfieldName := fields[0]\n\n\t\t_, ok := WANT[fieldName]\n\t\tif ok && len(fields) == 3 {\n\t\t\tval, numerr := strconv.ParseUint(fields[1], 10, 64)\n\t\t\tif numerr != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch fieldName {\n\t\t\tcase \"Buffers:\":\n\t\t\t\tmemInfo.Buffers = val * Multi\n\t\t\tcase \"Cached:\":\n\t\t\t\tmemInfo.Cached = val * Multi\n\t\t\tcase \"MemTotal:\":\n\t\t\t\tmemInfo.MemTotal = val * Multi\n\t\t\tcase \"MemFree:\":\n\t\t\t\tmemInfo.MemFree = val * Multi\n\t\t\tcase \"MemAvailable:\":\n\t\t\t\tmemInfo.MemAvailable = val * Multi\n\t\t\tcase \"SwapTotal:\":\n\t\t\t\tmemInfo.SwapTotal = val * Multi\n\t\t\tcase \"SwapFree:\":\n\t\t\t\tmemInfo.SwapFree = val * Multi\n\t\t\t}\n\t\t}\n\t}\n\n\tmemInfo.SwapUsed = memInfo.SwapTotal - memInfo.SwapFree\n\n\treturn memInfo, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\n\t\"github.com\/kubernetes-sigs\/volcano\/pkg\/apis\/batch\/v1alpha1\"\n)\n\nvar _ = Describe(\"Job E2E Test: Test Admission service\", func() {\n\tIt(\"Duplicated Task Name\", func() {\n\t\tjobName := \"job-duplicated\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\t\trep := clusterSize(context, oneCPU)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"duplicated\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"duplicated\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"duplicated task name\"))\n\t})\n\n\tIt(\"Duplicated Policy Event\", func() {\n\t\tjobName := \"job-policy-duplicated\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\t\trep := clusterSize(context, oneCPU)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"taskname\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpolicies: []v1alpha1.LifecyclePolicy{\n\t\t\t\t{\n\t\t\t\t\tEvent: v1alpha1.PodFailedEvent,\n\t\t\t\t\tAction: v1alpha1.AbortJobAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEvent: v1alpha1.PodFailedEvent,\n\t\t\t\t\tAction: v1alpha1.RestartJobAction,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"duplicated job event policies\"))\n\t})\n\n\tIt(\"Min Available illegal\", func() {\n\t\tjobName := \"job-min-illegal\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\t\trep := clusterSize(context, oneCPU)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tmin: rep * 2,\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"taskname\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"'minAvailable' should not be greater than total replicas in tasks\"))\n\t})\n\n\tIt(\"Job Plugin illegal\", func() {\n\t\tjobName := \"job-plugin-illegal\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tmin: 1,\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\tplugins: map[string][]string{\n\t\t\t\t\"big_plugin\": {},\n\t\t\t},\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: 1,\n\t\t\t\t\trep: 1,\n\t\t\t\t\tname: \"taskname\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"unable to find job plugin: big_plugin\"))\n\t})\n})\n<commit_msg>fix admission e2e<commit_after>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\n\t\"github.com\/kubernetes-sigs\/volcano\/pkg\/apis\/batch\/v1alpha1\"\n)\n\nvar _ = Describe(\"Job E2E Test: Test Admission service\", func() {\n\tIt(\"Duplicated Task Name\", func() {\n\t\tjobName := \"job-duplicated\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\t\trep := clusterSize(context, oneCPU)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"duplicated\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"duplicated\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"duplicated task name\"))\n\t})\n\n\tIt(\"Duplicated Policy Event\", func() {\n\t\tjobName := \"job-policy-duplicated\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\t\trep := clusterSize(context, oneCPU)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"taskname\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpolicies: []v1alpha1.LifecyclePolicy{\n\t\t\t\t{\n\t\t\t\t\tEvent: v1alpha1.PodFailedEvent,\n\t\t\t\t\tAction: v1alpha1.AbortJobAction,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEvent: v1alpha1.PodFailedEvent,\n\t\t\t\t\tAction: v1alpha1.RestartJobAction,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"duplicate event PodFailed\"))\n\t})\n\n\tIt(\"Min Available illegal\", func() {\n\t\tjobName := \"job-min-illegal\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\t\trep := clusterSize(context, oneCPU)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tmin: rep * 2,\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: rep,\n\t\t\t\t\trep: rep,\n\t\t\t\t\tname: \"taskname\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"'minAvailable' should not be greater than total replicas in tasks\"))\n\t})\n\n\tIt(\"Job Plugin illegal\", func() {\n\t\tjobName := \"job-plugin-illegal\"\n\t\tnamespace := \"test\"\n\t\tcontext := initTestContext()\n\t\tdefer cleanupTestContext(context)\n\n\t\t_, err := createJobInner(context, &jobSpec{\n\t\t\tmin: 1,\n\t\t\tnamespace: namespace,\n\t\t\tname: jobName,\n\t\t\tplugins: map[string][]string{\n\t\t\t\t\"big_plugin\": {},\n\t\t\t},\n\t\t\ttasks: []taskSpec{\n\t\t\t\t{\n\t\t\t\t\timg: defaultNginxImage,\n\t\t\t\t\treq: oneCPU,\n\t\t\t\t\tmin: 1,\n\t\t\t\t\trep: 1,\n\t\t\t\t\tname: \"taskname\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tExpect(err).To(HaveOccurred())\n\t\tstError, ok := err.(*errors.StatusError)\n\t\tExpect(ok).To(Equal(true))\n\t\tExpect(stError.ErrStatus.Code).To(Equal(int32(500)))\n\t\tExpect(stError.ErrStatus.Message).To(ContainSubstring(\"unable to find job plugin: big_plugin\"))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package s3api\n\nimport (\n\t\"context\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\txhttp \"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/http\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/s3err\"\n)\n\ntype ListBucketResultV2 struct {\n\tXMLName xml.Name `xml:\"http:\/\/s3.amazonaws.com\/doc\/2006-03-01\/ ListBucketResult\"`\n\tName string `xml:\"Name\"`\n\tPrefix string `xml:\"Prefix\"`\n\tMaxKeys int `xml:\"MaxKeys\"`\n\tDelimiter string `xml:\"Delimiter,omitempty\"`\n\tIsTruncated bool `xml:\"IsTruncated\"`\n\tContents []ListEntry `xml:\"Contents,omitempty\"`\n\tCommonPrefixes []PrefixEntry `xml:\"CommonPrefixes,omitempty\"`\n\tContinuationToken string `xml:\"ContinuationToken,omitempty\"`\n\tNextContinuationToken string `xml:\"NextContinuationToken,omitempty\"`\n\tKeyCount int `xml:\"KeyCount\"`\n\tStartAfter string `xml:\"StartAfter,omitempty\"`\n}\n\nfunc (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ https:\/\/docs.aws.amazon.com\/AmazonS3\/latest\/API\/v2-RESTBucketGET.html\n\n\t\/\/ collect parameters\n\tbucket, _ := getBucketAndObject(r)\n\tglog.V(3).Infof(\"ListObjectsV2Handler %s\", bucket)\n\n\toriginalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())\n\n\tif maxKeys < 0 {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)\n\t\treturn\n\t}\n\tif delimiter != \"\" && delimiter != \"\/\" {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)\n\t\treturn\n\t}\n\n\tmarker := continuationToken\n\tif continuationToken == \"\" {\n\t\tmarker = startAfter\n\t}\n\n\tresponse, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)\n\n\tif err != nil {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInternalError)\n\t\treturn\n\t}\n\n\tif len(response.Contents) == 0 {\n\t\tif exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {\n\t\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)\n\t\t\treturn\n\t\t}\n\t}\n\n\tresponseV2 := &ListBucketResultV2{\n\t\tXMLName: response.XMLName,\n\t\tName: response.Name,\n\t\tCommonPrefixes: response.CommonPrefixes,\n\t\tContents: response.Contents,\n\t\tContinuationToken: continuationToken,\n\t\tDelimiter: response.Delimiter,\n\t\tIsTruncated: response.IsTruncated,\n\t\tKeyCount: len(response.Contents) + len(response.CommonPrefixes),\n\t\tMaxKeys: response.MaxKeys,\n\t\tNextContinuationToken: response.NextMarker,\n\t\tPrefix: response.Prefix,\n\t\tStartAfter: startAfter,\n\t}\n\n\twriteSuccessResponseXML(w, r, responseV2)\n}\n\nfunc (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ https:\/\/docs.aws.amazon.com\/AmazonS3\/latest\/API\/RESTBucketGET.html\n\n\t\/\/ collect parameters\n\tbucket, _ := getBucketAndObject(r)\n\tglog.V(3).Infof(\"ListObjectsV1Handler %s\", bucket)\n\n\toriginalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())\n\n\tif maxKeys < 0 {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)\n\t\treturn\n\t}\n\tif delimiter != \"\" && delimiter != \"\/\" {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)\n\t\treturn\n\t}\n\n\tresponse, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)\n\n\tif err != nil {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInternalError)\n\t\treturn\n\t}\n\n\tif len(response.Contents) == 0 {\n\t\tif exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {\n\t\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)\n\t\t\treturn\n\t\t}\n\t}\n\n\twriteSuccessResponseXML(w, r, response)\n}\n\nfunc (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {\n\t\/\/ convert full path prefix into directory name and prefix for entry name\n\treqDir, prefix := filepath.Split(originalPrefix)\n\tif strings.HasPrefix(reqDir, \"\/\") {\n\t\treqDir = reqDir[1:]\n\t}\n\tbucketPrefix := fmt.Sprintf(\"%s\/%s\/\", s3a.option.BucketsPath, bucket)\n\treqDir = fmt.Sprintf(\"%s%s\", bucketPrefix, reqDir)\n\tif strings.HasSuffix(reqDir, \"\/\") {\n\t\t\/\/ remove trailing \"\/\"\n\t\treqDir = reqDir[:len(reqDir)-1]\n\t}\n\n\tvar contents []ListEntry\n\tvar commonPrefixes []PrefixEntry\n\tvar isTruncated bool\n\tvar doErr error\n\tvar nextMarker string\n\n\t\/\/ check filer\n\terr = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\n\t\t_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) {\n\t\t\tif entry.IsDirectory {\n\t\t\t\tif delimiter == \"\/\" {\n\t\t\t\t\tcommonPrefixes = append(commonPrefixes, PrefixEntry{\n\t\t\t\t\t\tPrefix: fmt.Sprintf(\"%s\/%s\/\", dir, entry.Name)[len(bucketPrefix):],\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstorageClass := \"STANDARD\"\n\t\t\t\tif v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {\n\t\t\t\t\tstorageClass = string(v)\n\t\t\t\t}\n\t\t\t\tcontents = append(contents, ListEntry{\n\t\t\t\t\tKey: fmt.Sprintf(\"%s\/%s\", dir, entry.Name)[len(bucketPrefix):],\n\t\t\t\t\tLastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),\n\t\t\t\t\tETag: \"\\\"\" + filer.ETag(entry) + \"\\\"\",\n\t\t\t\t\tSize: int64(filer.FileSize(entry)),\n\t\t\t\t\tOwner: CanonicalUser{\n\t\t\t\t\t\tID: fmt.Sprintf(\"%x\", entry.Attributes.Uid),\n\t\t\t\t\t\tDisplayName: entry.Attributes.UserName,\n\t\t\t\t\t},\n\t\t\t\t\tStorageClass: StorageClass(storageClass),\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t\tif doErr != nil {\n\t\t\treturn doErr\n\t\t}\n\n\t\tif !isTruncated {\n\t\t\tnextMarker = \"\"\n\t\t}\n\n\t\tresponse = ListBucketResult{\n\t\t\tName: bucket,\n\t\t\tPrefix: originalPrefix,\n\t\t\tMarker: marker,\n\t\t\tNextMarker: nextMarker,\n\t\t\tMaxKeys: maxKeys,\n\t\t\tDelimiter: delimiter,\n\t\t\tIsTruncated: isTruncated,\n\t\t\tContents: contents,\n\t\t\tCommonPrefixes: commonPrefixes,\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn\n}\n\nfunc (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {\n\t\/\/ invariants\n\t\/\/ prefix and marker should be under dir, marker may contain \"\/\"\n\t\/\/ maxKeys should be updated for each recursion\n\n\tif prefix == \"\/\" && delimiter == \"\/\" {\n\t\treturn\n\t}\n\tif maxKeys <= 0 {\n\t\treturn\n\t}\n\n\tif strings.Contains(marker, \"\/\") {\n\t\tsepIndex := strings.Index(marker, \"\/\")\n\t\tsubDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]\n\t\t\/\/ println(\"doListFilerEntries dir\", dir+\"\/\"+subDir, \"subMarker\", subMarker, \"maxKeys\", maxKeys)\n\t\tsubCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+\"\/\"+subDir, \"\", maxKeys, subMarker, delimiter, eachEntryFn)\n\t\tif subErr != nil {\n\t\t\terr = subErr\n\t\t\treturn\n\t\t}\n\t\tisTruncated = isTruncated || subIsTruncated\n\t\tmaxKeys -= subCounter\n\t\tnextMarker = subDir + \"\/\" + subNextMarker\n\t\t\/\/ finished processing this sub directory\n\t\tmarker = subDir\n\t}\n\n\t\/\/ now marker is also a direct child of dir\n\trequest := &filer_pb.ListEntriesRequest{\n\t\tDirectory: dir,\n\t\tPrefix: prefix,\n\t\tLimit: uint32(maxKeys + 1),\n\t\tStartFromFileName: marker,\n\t\tInclusiveStartFrom: false,\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tstream, listErr := client.ListEntries(ctx, request)\n\tif listErr != nil {\n\t\terr = fmt.Errorf(\"list entires %+v: %v\", request, listErr)\n\t\treturn\n\t}\n\n\tfor {\n\t\tresp, recvErr := stream.Recv()\n\t\tif recvErr != nil {\n\t\t\tif recvErr == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"iterating entires %+v: %v\", request, recvErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif counter >= maxKeys {\n\t\t\tisTruncated = true\n\t\t\treturn\n\t\t}\n\t\tentry := resp.Entry\n\t\tnextMarker = entry.Name\n\t\tif entry.IsDirectory {\n\t\t\t\/\/ println(\"ListEntries\", dir, \"dir:\", entry.Name)\n\t\t\tif entry.Name != \".uploads\" { \/\/ FIXME no need to apply to all directories. this extra also affects maxKeys\n\t\t\t\tif delimiter != \"\/\" {\n\t\t\t\t\teachEntryFn(dir, entry)\n\t\t\t\t\t\/\/ println(\"doListFilerEntries2 dir\", dir+\"\/\"+entry.Name, \"maxKeys\", maxKeys-counter)\n\t\t\t\t\tsubCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+\"\/\"+entry.Name, \"\", maxKeys-counter, \"\", delimiter, eachEntryFn)\n\t\t\t\t\tif subErr != nil {\n\t\t\t\t\t\terr = fmt.Errorf(\"doListFilerEntries2: %v\", subErr)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ println(\"doListFilerEntries2 dir\", dir+\"\/\"+entry.Name, \"maxKeys\", maxKeys-counter, \"subCounter\", subCounter, \"subNextMarker\", subNextMarker, \"subIsTruncated\", subIsTruncated)\n\t\t\t\t\tcounter += subCounter\n\t\t\t\t\tnextMarker = entry.Name + \"\/\" + subNextMarker\n\t\t\t\t\tif subIsTruncated {\n\t\t\t\t\t\tisTruncated = true\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar isEmpty bool\n\t\t\t\t\tif !s3a.option.AllowEmptyFolder {\n\t\t\t\t\t\tif isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {\n\t\t\t\t\t\t\tglog.Errorf(\"check empty folder %s: %v\", dir, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif !isEmpty {\n\t\t\t\t\t\teachEntryFn(dir, entry)\n\t\t\t\t\t\tcounter++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ println(\"ListEntries\", dir, \"file:\", entry.Name)\n\t\t\teachEntryFn(dir, entry)\n\t\t\tcounter++\n\t\t}\n\t}\n\treturn\n}\n\nfunc getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {\n\tprefix = values.Get(\"prefix\")\n\ttoken = values.Get(\"continuation-token\")\n\tstartAfter = values.Get(\"start-after\")\n\tdelimiter = values.Get(\"delimiter\")\n\tif values.Get(\"max-keys\") != \"\" {\n\t\tmaxkeys, _ = strconv.Atoi(values.Get(\"max-keys\"))\n\t} else {\n\t\tmaxkeys = maxObjectListSizeLimit\n\t}\n\tfetchOwner = values.Get(\"fetch-owner\") == \"true\"\n\treturn\n}\n\nfunc getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {\n\tprefix = values.Get(\"prefix\")\n\tmarker = values.Get(\"marker\")\n\tdelimiter = values.Get(\"delimiter\")\n\tif values.Get(\"max-keys\") != \"\" {\n\t\tmaxkeys, _ = strconv.Atoi(values.Get(\"max-keys\"))\n\t} else {\n\t\tmaxkeys = maxObjectListSizeLimit\n\t}\n\treturn\n}\n\nfunc (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {\n\t\/\/ println(\"+ isDirectoryAllEmpty\", dir, name)\n\tglog.V(4).Infof(\"+ isEmpty %s\/%s\", parentDir, name)\n\tdefer glog.V(4).Infof(\"- isEmpty %s\/%s %v\", parentDir, name, isEmpty)\n\tvar fileCounter int\n\tvar subDirs []string\n\tcurrentDir := parentDir + \"\/\" + name\n\tvar startFrom string\n\tvar isExhausted bool\n\tvar foundEntry bool\n\tfor fileCounter == 0 && !isExhausted && err == nil {\n\t\terr = filer_pb.SeaweedList(filerClient, currentDir, \"\", func(entry *filer_pb.Entry, isLast bool) error {\n\t\t\tfoundEntry = true\n\t\t\tif entry.IsDirectory {\n\t\t\t\tsubDirs = append(subDirs, entry.Name)\n\t\t\t} else {\n\t\t\t\tfileCounter++\n\t\t\t}\n\t\t\tstartFrom = entry.Name\n\t\t\tisExhausted = isExhausted || isLast\n\t\t\tglog.V(4).Infof(\" * %s\/%s isLast: %t\", currentDir, startFrom, isLast)\n\t\t\treturn nil\n\t\t}, startFrom, false, 8)\n\t\tif !foundEntry {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif fileCounter > 0 {\n\t\treturn false, nil\n\t}\n\n\tfor _, subDir := range subDirs {\n\t\tisSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir)\n\t\tif subErr != nil {\n\t\t\treturn false, subErr\n\t\t}\n\t\tif !isSubEmpty {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tglog.V(1).Infof(\"deleting empty folder %s\", currentDir)\n\tif err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil {\n\t\treturn\n\t}\n\n\treturn true, nil\n}\n<commit_msg>s3: fix ListObject if more than 10000 objects<commit_after>package s3api\n\nimport (\n\t\"context\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\txhttp \"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/http\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/s3err\"\n)\n\ntype ListBucketResultV2 struct {\n\tXMLName xml.Name `xml:\"http:\/\/s3.amazonaws.com\/doc\/2006-03-01\/ ListBucketResult\"`\n\tName string `xml:\"Name\"`\n\tPrefix string `xml:\"Prefix\"`\n\tMaxKeys int `xml:\"MaxKeys\"`\n\tDelimiter string `xml:\"Delimiter,omitempty\"`\n\tIsTruncated bool `xml:\"IsTruncated\"`\n\tContents []ListEntry `xml:\"Contents,omitempty\"`\n\tCommonPrefixes []PrefixEntry `xml:\"CommonPrefixes,omitempty\"`\n\tContinuationToken string `xml:\"ContinuationToken,omitempty\"`\n\tNextContinuationToken string `xml:\"NextContinuationToken,omitempty\"`\n\tKeyCount int `xml:\"KeyCount\"`\n\tStartAfter string `xml:\"StartAfter,omitempty\"`\n}\n\nfunc (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ https:\/\/docs.aws.amazon.com\/AmazonS3\/latest\/API\/v2-RESTBucketGET.html\n\n\t\/\/ collect parameters\n\tbucket, _ := getBucketAndObject(r)\n\tglog.V(3).Infof(\"ListObjectsV2Handler %s\", bucket)\n\n\toriginalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())\n\n\tif maxKeys < 0 {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)\n\t\treturn\n\t}\n\tif delimiter != \"\" && delimiter != \"\/\" {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)\n\t\treturn\n\t}\n\n\tmarker := continuationToken\n\tif continuationToken == \"\" {\n\t\tmarker = startAfter\n\t}\n\n\tresponse, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)\n\n\tif err != nil {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInternalError)\n\t\treturn\n\t}\n\n\tif len(response.Contents) == 0 {\n\t\tif exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {\n\t\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)\n\t\t\treturn\n\t\t}\n\t}\n\n\tresponseV2 := &ListBucketResultV2{\n\t\tXMLName: response.XMLName,\n\t\tName: response.Name,\n\t\tCommonPrefixes: response.CommonPrefixes,\n\t\tContents: response.Contents,\n\t\tContinuationToken: continuationToken,\n\t\tDelimiter: response.Delimiter,\n\t\tIsTruncated: response.IsTruncated,\n\t\tKeyCount: len(response.Contents) + len(response.CommonPrefixes),\n\t\tMaxKeys: response.MaxKeys,\n\t\tNextContinuationToken: response.NextMarker,\n\t\tPrefix: response.Prefix,\n\t\tStartAfter: startAfter,\n\t}\n\n\twriteSuccessResponseXML(w, r, responseV2)\n}\n\nfunc (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ https:\/\/docs.aws.amazon.com\/AmazonS3\/latest\/API\/RESTBucketGET.html\n\n\t\/\/ collect parameters\n\tbucket, _ := getBucketAndObject(r)\n\tglog.V(3).Infof(\"ListObjectsV1Handler %s\", bucket)\n\n\toriginalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())\n\n\tif maxKeys < 0 {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)\n\t\treturn\n\t}\n\tif delimiter != \"\" && delimiter != \"\/\" {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)\n\t\treturn\n\t}\n\n\tresponse, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)\n\n\tif err != nil {\n\t\ts3err.WriteErrorResponse(w, r, s3err.ErrInternalError)\n\t\treturn\n\t}\n\n\tif len(response.Contents) == 0 {\n\t\tif exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {\n\t\t\ts3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)\n\t\t\treturn\n\t\t}\n\t}\n\n\twriteSuccessResponseXML(w, r, response)\n}\n\nfunc (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {\n\t\/\/ convert full path prefix into directory name and prefix for entry name\n\treqDir, prefix := filepath.Split(originalPrefix)\n\tif strings.HasPrefix(reqDir, \"\/\") {\n\t\treqDir = reqDir[1:]\n\t}\n\tbucketPrefix := fmt.Sprintf(\"%s\/%s\/\", s3a.option.BucketsPath, bucket)\n\treqDir = fmt.Sprintf(\"%s%s\", bucketPrefix, reqDir)\n\tif strings.HasSuffix(reqDir, \"\/\") {\n\t\t\/\/ remove trailing \"\/\"\n\t\treqDir = reqDir[:len(reqDir)-1]\n\t}\n\n\tvar contents []ListEntry\n\tvar commonPrefixes []PrefixEntry\n\tvar isTruncated bool\n\tvar doErr error\n\tvar nextMarker string\n\n\t\/\/ check filer\n\terr = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\n\t\t_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) {\n\t\t\tif entry.IsDirectory {\n\t\t\t\tif delimiter == \"\/\" {\n\t\t\t\t\tcommonPrefixes = append(commonPrefixes, PrefixEntry{\n\t\t\t\t\t\tPrefix: fmt.Sprintf(\"%s\/%s\/\", dir, entry.Name)[len(bucketPrefix):],\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstorageClass := \"STANDARD\"\n\t\t\t\tif v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {\n\t\t\t\t\tstorageClass = string(v)\n\t\t\t\t}\n\t\t\t\tcontents = append(contents, ListEntry{\n\t\t\t\t\tKey: fmt.Sprintf(\"%s\/%s\", dir, entry.Name)[len(bucketPrefix):],\n\t\t\t\t\tLastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),\n\t\t\t\t\tETag: \"\\\"\" + filer.ETag(entry) + \"\\\"\",\n\t\t\t\t\tSize: int64(filer.FileSize(entry)),\n\t\t\t\t\tOwner: CanonicalUser{\n\t\t\t\t\t\tID: fmt.Sprintf(\"%x\", entry.Attributes.Uid),\n\t\t\t\t\t\tDisplayName: entry.Attributes.UserName,\n\t\t\t\t\t},\n\t\t\t\t\tStorageClass: StorageClass(storageClass),\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t\tif doErr != nil {\n\t\t\treturn doErr\n\t\t}\n\n\t\tif !isTruncated {\n\t\t\tnextMarker = \"\"\n\t\t}\n\n\t\tresponse = ListBucketResult{\n\t\t\tName: bucket,\n\t\t\tPrefix: originalPrefix,\n\t\t\tMarker: marker,\n\t\t\tNextMarker: nextMarker,\n\t\t\tMaxKeys: maxKeys,\n\t\t\tDelimiter: delimiter,\n\t\t\tIsTruncated: isTruncated,\n\t\t\tContents: contents,\n\t\t\tCommonPrefixes: commonPrefixes,\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn\n}\n\nfunc (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {\n\t\/\/ invariants\n\t\/\/ prefix and marker should be under dir, marker may contain \"\/\"\n\t\/\/ maxKeys should be updated for each recursion\n\n\tif prefix == \"\/\" && delimiter == \"\/\" {\n\t\treturn\n\t}\n\tif maxKeys <= 0 {\n\t\treturn\n\t}\n\n\tif strings.Contains(marker, \"\/\") {\n\t\tsepIndex := strings.Index(marker, \"\/\")\n\t\tsubDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]\n\t\t\/\/ println(\"doListFilerEntries dir\", dir+\"\/\"+subDir, \"subMarker\", subMarker, \"maxKeys\", maxKeys)\n\t\tsubCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+\"\/\"+subDir, \"\", maxKeys, subMarker, delimiter, eachEntryFn)\n\t\tif subErr != nil {\n\t\t\terr = subErr\n\t\t\treturn\n\t\t}\n\t\tcounter += subCounter\n\t\tisTruncated = isTruncated || subIsTruncated\n\t\tmaxKeys -= subCounter\n\t\tnextMarker = subDir + \"\/\" + subNextMarker\n\t\t\/\/ finished processing this sub directory\n\t\tmarker = subDir\n\t}\n\tif maxKeys <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ now marker is also a direct child of dir\n\trequest := &filer_pb.ListEntriesRequest{\n\t\tDirectory: dir,\n\t\tPrefix: prefix,\n\t\tLimit: uint32(maxKeys + 1),\n\t\tStartFromFileName: marker,\n\t\tInclusiveStartFrom: false,\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tstream, listErr := client.ListEntries(ctx, request)\n\tif listErr != nil {\n\t\terr = fmt.Errorf(\"list entires %+v: %v\", request, listErr)\n\t\treturn\n\t}\n\n\tfor {\n\t\tresp, recvErr := stream.Recv()\n\t\tif recvErr != nil {\n\t\t\tif recvErr == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"iterating entires %+v: %v\", request, recvErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif counter >= maxKeys {\n\t\t\tisTruncated = true\n\t\t\treturn\n\t\t}\n\t\tentry := resp.Entry\n\t\tnextMarker = entry.Name\n\t\tif entry.IsDirectory {\n\t\t\t\/\/ println(\"ListEntries\", dir, \"dir:\", entry.Name)\n\t\t\tif entry.Name != \".uploads\" { \/\/ FIXME no need to apply to all directories. this extra also affects maxKeys\n\t\t\t\tif delimiter != \"\/\" {\n\t\t\t\t\teachEntryFn(dir, entry)\n\t\t\t\t\t\/\/ println(\"doListFilerEntries2 dir\", dir+\"\/\"+entry.Name, \"maxKeys\", maxKeys-counter)\n\t\t\t\t\tsubCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+\"\/\"+entry.Name, \"\", maxKeys-counter, \"\", delimiter, eachEntryFn)\n\t\t\t\t\tif subErr != nil {\n\t\t\t\t\t\terr = fmt.Errorf(\"doListFilerEntries2: %v\", subErr)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ println(\"doListFilerEntries2 dir\", dir+\"\/\"+entry.Name, \"maxKeys\", maxKeys-counter, \"subCounter\", subCounter, \"subNextMarker\", subNextMarker, \"subIsTruncated\", subIsTruncated)\n\t\t\t\t\tcounter += subCounter\n\t\t\t\t\tnextMarker = entry.Name + \"\/\" + subNextMarker\n\t\t\t\t\tif subIsTruncated {\n\t\t\t\t\t\tisTruncated = true\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar isEmpty bool\n\t\t\t\t\tif !s3a.option.AllowEmptyFolder {\n\t\t\t\t\t\tif isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {\n\t\t\t\t\t\t\tglog.Errorf(\"check empty folder %s: %v\", dir, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif !isEmpty {\n\t\t\t\t\t\teachEntryFn(dir, entry)\n\t\t\t\t\t\tcounter++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ println(\"ListEntries\", dir, \"file:\", entry.Name)\n\t\t\teachEntryFn(dir, entry)\n\t\t\tcounter++\n\t\t}\n\t}\n\treturn\n}\n\nfunc getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {\n\tprefix = values.Get(\"prefix\")\n\ttoken = values.Get(\"continuation-token\")\n\tstartAfter = values.Get(\"start-after\")\n\tdelimiter = values.Get(\"delimiter\")\n\tif values.Get(\"max-keys\") != \"\" {\n\t\tmaxkeys, _ = strconv.Atoi(values.Get(\"max-keys\"))\n\t} else {\n\t\tmaxkeys = maxObjectListSizeLimit\n\t}\n\tfetchOwner = values.Get(\"fetch-owner\") == \"true\"\n\treturn\n}\n\nfunc getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {\n\tprefix = values.Get(\"prefix\")\n\tmarker = values.Get(\"marker\")\n\tdelimiter = values.Get(\"delimiter\")\n\tif values.Get(\"max-keys\") != \"\" {\n\t\tmaxkeys, _ = strconv.Atoi(values.Get(\"max-keys\"))\n\t} else {\n\t\tmaxkeys = maxObjectListSizeLimit\n\t}\n\treturn\n}\n\nfunc (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {\n\t\/\/ println(\"+ isDirectoryAllEmpty\", dir, name)\n\tglog.V(4).Infof(\"+ isEmpty %s\/%s\", parentDir, name)\n\tdefer glog.V(4).Infof(\"- isEmpty %s\/%s %v\", parentDir, name, isEmpty)\n\tvar fileCounter int\n\tvar subDirs []string\n\tcurrentDir := parentDir + \"\/\" + name\n\tvar startFrom string\n\tvar isExhausted bool\n\tvar foundEntry bool\n\tfor fileCounter == 0 && !isExhausted && err == nil {\n\t\terr = filer_pb.SeaweedList(filerClient, currentDir, \"\", func(entry *filer_pb.Entry, isLast bool) error {\n\t\t\tfoundEntry = true\n\t\t\tif entry.IsDirectory {\n\t\t\t\tsubDirs = append(subDirs, entry.Name)\n\t\t\t} else {\n\t\t\t\tfileCounter++\n\t\t\t}\n\t\t\tstartFrom = entry.Name\n\t\t\tisExhausted = isExhausted || isLast\n\t\t\tglog.V(4).Infof(\" * %s\/%s isLast: %t\", currentDir, startFrom, isLast)\n\t\t\treturn nil\n\t\t}, startFrom, false, 8)\n\t\tif !foundEntry {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif fileCounter > 0 {\n\t\treturn false, nil\n\t}\n\n\tfor _, subDir := range subDirs {\n\t\tisSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir)\n\t\tif subErr != nil {\n\t\t\treturn false, subErr\n\t\t}\n\t\tif !isSubEmpty {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tglog.V(1).Infof(\"deleting empty folder %s\", currentDir)\n\tif err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil {\n\t\treturn\n\t}\n\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package lnwallet\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"time\"\n\n\t\"li.lan\/labs\/plasma\/shachain\"\n\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t\"github.com\/btcsuite\/btcd\/wire\"\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/btcsuite\/btcwallet\/waddrmgr\"\n\t\"github.com\/btcsuite\/btcwallet\/walletdb\"\n)\n\nvar (\n\t\/\/ Namespace bucket keys.\n\tlightningNamespaceKey = []byte(\"ln-wallet\")\n\twaddrmgrNamespaceKey = []byte(\"waddrmgr\")\n\twtxmgrNamespaceKey = []byte(\"wtxmgr\")\n\n\topenChannelBucket = []byte(\"o-chans\")\n\tclosedChannelBucket = []byte(\"c-chans\")\n\tfundingTxKey = []byte(\"funding\")\n\n\tendian = binary.BigEndian\n)\n\n\/\/ ChannelDB...\ntype ChannelDB struct {\n\t\/\/ TODO(roasbeef): caching, etc?\n\twallet *LightningWallet\n\n\tnamespace walletdb.Namespace\n}\n\nfunc NewChannelDB(wallet *LightningWallet, n walletdb.Namespace) *ChannelDB {\n\treturn &ChannelDB{wallet, n}\n}\n\n\/\/ OpenChannelState...\n\/\/ TODO(roasbeef): store only the essentials? optimize space...\n\/\/ TODO(roasbeef): switch to \"column store\"\ntype OpenChannelState struct {\n\t\/\/ Hash? or Their current pubKey?\n\t\/\/ TODO(roasbeef): switch to Tadge's LNId\n\tTheirLNID [wire.HashSize]byte\n\n\t\/\/ The ID of a channel is the txid of the funding transaction.\n\tChanID [wire.HashSize]byte\n\n\tMinFeePerKb btcutil.Amount\n\t\/\/ Our reserve. Assume symmetric reserve amounts. Only needed if the\n\t\/\/ funding type is CLTV.\n\t\/\/ReserveAmount btcutil.Amount\n\n\t\/\/ Keys for both sides to be used for the commitment transactions.\n\tOurCommitKey *btcec.PrivateKey\n\tTheirCommitKey *btcec.PublicKey\n\n\t\/\/ Tracking total channel capacity, and the amount of funds allocated\n\t\/\/ to each side.\n\tCapacity btcutil.Amount\n\tOurBalance btcutil.Amount\n\tTheirBalance btcutil.Amount\n\n\t\/\/ Commitment transactions for both sides (they're asymmetric). Also\n\t\/\/ their signature which lets us spend our version of the commitment\n\t\/\/ transaction.\n\tTheirCommitTx *wire.MsgTx\n\tOurCommitTx *wire.MsgTx \/\/ TODO(roasbeef): store hash instead?\n\tTheirCommitSig []byte \/\/ TODO(roasbeef): fixed length?, same w\/ redeem\n\n\t\/\/ The final funding transaction. Kept wallet-related records.\n\tFundingTx *wire.MsgTx\n\n\tMultiSigKey *btcec.PrivateKey\n\tFundingRedeemScript []byte\n\n\t\/\/ Current revocation for their commitment transaction. However, since\n\t\/\/ this is the hash, and not the pre-image, we can't yet verify that\n\t\/\/ it's actually in the chain.\n\tTheirCurrentRevocation [wire.HashSize]byte\n\tTheirShaChain *shachain.HyperShaChain\n\tOurShaChain *shachain.HyperShaChain\n\n\t\/\/ Final delivery address\n\tOurDeliveryAddress btcutil.Address\n\tTheirDeliveryAddress btcutil.Address\n\n\t\/\/ In blocks\n\tCsvDelay uint32\n\n\t\/\/ TODO(roasbeef): track fees, other stats?\n\tNumUpdates uint64\n\tTotalSatoshisSent uint64\n\tTotalSatoshisReceived uint64\n\tCreationTime time.Time\n}\n\n\/\/ Encode...\n\/\/ TODO(roasbeef): checksum\nfunc (o *OpenChannelState) Encode(b io.Writer, addrManager *waddrmgr.Manager) error {\n\tif _, err := b.Write(o.TheirLNID[:]); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.ChanID[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, uint64(o.MinFeePerKb)); err != nil {\n\t\treturn err\n\t}\n\n\tencryptedPriv, err := addrManager.Encrypt(waddrmgr.CKTPrivate,\n\t\to.OurCommitKey.Serialize())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(encryptedPriv); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.TheirCommitKey.SerializeCompressed()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, uint64(o.Capacity)); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, uint64(o.OurBalance)); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, uint64(o.TheirBalance)); err != nil {\n\t\treturn err\n\t}\n\n\tif err := o.TheirCommitTx.Serialize(b); err != nil {\n\t\treturn err\n\t}\n\tif err := o.OurCommitTx.Serialize(b); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.TheirCommitSig[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := o.FundingTx.Serialize(b); err != nil {\n\t\treturn err\n\t}\n\n\tencryptedPriv, err = addrManager.Encrypt(waddrmgr.CKTPrivate,\n\t\to.MultiSigKey.Serialize())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(encryptedPriv); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.FundingRedeemScript); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Write(o.TheirCurrentRevocation[:]); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO(roasbeef): serialize shachains\n\n\tif _, err := b.Write([]byte(o.OurDeliveryAddress.EncodeAddress())); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write([]byte(o.TheirDeliveryAddress.EncodeAddress())); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, o.CsvDelay); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, o.NumUpdates); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, o.TotalSatoshisSent); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, o.TotalSatoshisReceived); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, o.CreationTime.Unix()); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Decode...\nfunc (o *OpenChannelState) Decode(b io.Reader, addrManager *waddrmgr.Manager) error {\n\tvar scratch [8]byte\n\n\tif _, err := b.Read(o.TheirLNID[:]); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Read(o.ChanID[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.MinFeePerKb = btcutil.Amount(endian.Uint64(scratch[:]))\n\n\t\/\/ nonce + serPrivKey + mac\n\tvar encryptedPriv [24 + 32 + 16]byte\n\tif _, err := b.Read(encryptedPriv[:]); err != nil {\n\t\treturn err\n\t}\n\tdecryptedPriv, err := addrManager.Decrypt(waddrmgr.CKTPrivate, encryptedPriv[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\to.OurCommitKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), decryptedPriv)\n\n\tvar serPubKey [33]byte\n\tif _, err := b.Read(serPubKey[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirCommitKey, err = btcec.ParsePubKey(serPubKey[:], btcec.S256())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.Capacity = btcutil.Amount(endian.Uint64(scratch[:]))\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.OurBalance = btcutil.Amount(endian.Uint64(scratch[:]))\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirBalance = btcutil.Amount(endian.Uint64(scratch[:]))\n\n\to.TheirCommitTx = wire.NewMsgTx()\n\tif err := o.TheirCommitTx.Deserialize(b); err != nil {\n\t\treturn err\n\t}\n\to.OurCommitTx = wire.NewMsgTx()\n\tif err := o.OurCommitTx.Deserialize(b); err != nil {\n\t\treturn err\n\t}\n\n\tvar sig [64]byte\n\tif _, err := b.Read(sig[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirCommitSig = sig[:]\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.FundingTx = wire.NewMsgTx()\n\tif err := o.FundingTx.Deserialize(b); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(encryptedPriv[:]); err != nil {\n\t\treturn err\n\t}\n\tdecryptedPriv, err = addrManager.Decrypt(waddrmgr.CKTPrivate, encryptedPriv[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\to.MultiSigKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), decryptedPriv)\n\n\tvar redeemScript [71]byte\n\tif _, err := b.Read(redeemScript[:]); err != nil {\n\t\treturn err\n\t}\n\to.FundingRedeemScript = redeemScript[:]\n\n\tif _, err := b.Read(o.TheirCurrentRevocation[:]); err != nil {\n\t\treturn err\n\t}\n\n\tvar addr [34]byte\n\tif _, err := b.Read(addr[:]); err != nil {\n\t\treturn err\n\t}\n\to.OurDeliveryAddress, err = btcutil.DecodeAddress(string(addr[:]), ActiveNetParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(addr[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirDeliveryAddress, err = btcutil.DecodeAddress(string(addr[:]), ActiveNetParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Read(b, endian, &o.CsvDelay); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Read(b, endian, &o.NumUpdates); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Read(b, endian, &o.TotalSatoshisSent); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Read(b, endian, &o.TotalSatoshisReceived); err != nil {\n\t\treturn err\n\t}\n\n\tvar unix int64\n\tif err := binary.Read(b, endian, &unix); err != nil {\n\t\treturn err\n\t}\n\to.CreationTime = time.Unix(unix, 0)\n\n\treturn nil\n}\n\nfunc newOpenChannelState(ID [32]byte) *OpenChannelState {\n\treturn &OpenChannelState{TheirLNID: ID}\n}\n<commit_msg>lnwallet: implement FetchOpenChannel and PutOpenChannel<commit_after>package lnwallet\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"li.lan\/labs\/plasma\/shachain\"\n\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t\"github.com\/btcsuite\/btcd\/wire\"\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/btcsuite\/btcwallet\/waddrmgr\"\n\t\"github.com\/btcsuite\/btcwallet\/walletdb\"\n)\n\nvar (\n\t\/\/ Namespace bucket keys.\n\tlightningNamespaceKey = []byte(\"ln-wallet\")\n\twaddrmgrNamespaceKey = []byte(\"waddrmgr\")\n\twtxmgrNamespaceKey = []byte(\"wtxmgr\")\n\n\topenChannelBucket = []byte(\"o\")\n\tclosedChannelBucket = []byte(\"c\")\n\tactiveChanKey = []byte(\"a\")\n\n\tendian = binary.BigEndian\n)\n\n\/\/ ChannelDB...\n\/\/ TODO(roasbeef): CHECKSUMS, REDUNDANCY, etc etc.\ntype ChannelDB struct {\n\t\/\/ TODO(roasbeef): caching, etc?\n\taddrmgr *waddrmgr.Manager\n\n\tnamespace walletdb.Namespace\n}\n\n\/\/ PutOpenChannel...\nfunc (c *ChannelDB) PutOpenChannel(channel *OpenChannelState) error {\n\treturn c.namespace.Update(func(tx walletdb.Tx) error {\n\t\t\/\/ Get the bucket dedicated to storing the meta-data for open\n\t\t\/\/ channels.\n\t\trootBucket := tx.RootBucket()\n\t\topenChanBucket, err := rootBucket.CreateBucketIfNotExists(openChannelBucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn dbPutOpenChannel(openChanBucket, channel, c.addrmgr)\n\t})\n}\n\n\/\/ GetOpenChannel...\n\/\/ TODO(roasbeef): assumes only 1 active channel per-node\nfunc (c *ChannelDB) FetchOpenChannel(nodeID [32]byte) (*OpenChannelState, error) {\n\tvar channel *OpenChannelState\n\tvar err error\n\n\tdbErr := c.namespace.View(func(tx walletdb.Tx) error {\n\t\t\/\/ Get the bucket dedicated to storing the meta-data for open\n\t\t\/\/ channels.\n\t\trootBucket := tx.RootBucket()\n\t\topenChanBucket, err := rootBucket.CreateBucketIfNotExists(openChannelBucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tchannel, err = dbGetOpenChannel(openChanBucket, nodeID, c.addrmgr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn channel, dbErr\n}\n\n\/\/ dbPutChannel...\nfunc dbPutOpenChannel(activeChanBucket walletdb.Bucket, channel *OpenChannelState,\n\taddrmgr *waddrmgr.Manager) error {\n\n\t\/\/ Generate a serialized version of the open channel. The addrmgr is\n\t\/\/ required in order to encrypt densitive data.\n\tvar b bytes.Buffer\n\tif err := channel.Encode(&b, addrmgr); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Grab the bucket dedicated to storing data related to this particular\n\t\/\/ node.\n\tnodeBucket, err := activeChanBucket.CreateBucketIfNotExists(channel.TheirLNID[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nodeBucket.Put(activeChanKey, b.Bytes())\n}\n\n\/\/ dbPutChannel...\nfunc dbGetOpenChannel(bucket walletdb.Bucket, nodeID [32]byte,\n\taddrmgr *waddrmgr.Manager) (*OpenChannelState, error) {\n\t\/\/ Grab the bucket dedicated to storing data related to this particular\n\t\/\/ node.\n\tnodeBucket, err := bucket.CreateBucketIfNotExists(nodeID[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserializedChannel := nodeBucket.Get(activeChanKey)\n\tif serializedChannel == nil {\n\t\t\/\/ TODO(roasbeef): make proper in error.go\n\t\treturn nil, fmt.Errorf(\"node has no open channels\")\n\t}\n\n\t\/\/ Decode the serialized channel state, using the addrmgr to decrypt\n\t\/\/ sensitive information.\n\tchannel := &OpenChannelState{}\n\treader := bytes.NewReader(serializedChannel)\n\tif err := channel.Decode(reader, addrmgr); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn channel, nil\n}\n\n\/\/ NewChannelDB...\n\/\/ TODO(roasbeef): re-visit this dependancy...\nfunc NewChannelDB(addrmgr *waddrmgr.Manager, namespace walletdb.Namespace) *ChannelDB {\n\t\/\/ TODO(roasbeef): create buckets if not created?\n\treturn &ChannelDB{addrmgr, namespace}\n}\n\n\/\/ OpenChannelState...\n\/\/ TODO(roasbeef): store only the essentials? optimize space...\n\/\/ TODO(roasbeef): switch to \"column store\"\ntype OpenChannelState struct {\n\t\/\/ Hash? or Their current pubKey?\n\t\/\/ TODO(roasbeef): switch to Tadge's LNId\n\tTheirLNID [wire.HashSize]byte\n\n\t\/\/ The ID of a channel is the txid of the funding transaction.\n\tChanID [wire.HashSize]byte\n\n\tMinFeePerKb btcutil.Amount\n\t\/\/ Our reserve. Assume symmetric reserve amounts. Only needed if the\n\t\/\/ funding type is CLTV.\n\t\/\/ReserveAmount btcutil.Amount\n\n\t\/\/ Keys for both sides to be used for the commitment transactions.\n\tOurCommitKey *btcec.PrivateKey\n\tTheirCommitKey *btcec.PublicKey\n\n\t\/\/ Tracking total channel capacity, and the amount of funds allocated\n\t\/\/ to each side.\n\tCapacity btcutil.Amount\n\tOurBalance btcutil.Amount\n\tTheirBalance btcutil.Amount\n\n\t\/\/ Commitment transactions for both sides (they're asymmetric). Also\n\t\/\/ their signature which lets us spend our version of the commitment\n\t\/\/ transaction.\n\tTheirCommitTx *wire.MsgTx\n\tOurCommitTx *wire.MsgTx \/\/ TODO(roasbeef): store hash instead?\n\tTheirCommitSig []byte \/\/ TODO(roasbeef): fixed length?, same w\/ redeem\n\n\t\/\/ The final funding transaction. Kept wallet-related records.\n\tFundingTx *wire.MsgTx\n\n\tMultiSigKey *btcec.PrivateKey\n\tFundingRedeemScript []byte\n\n\t\/\/ Current revocation for their commitment transaction. However, since\n\t\/\/ this is the hash, and not the pre-image, we can't yet verify that\n\t\/\/ it's actually in the chain.\n\tTheirCurrentRevocation [wire.HashSize]byte\n\tTheirShaChain *shachain.HyperShaChain\n\tOurShaChain *shachain.HyperShaChain\n\n\t\/\/ Final delivery address\n\tOurDeliveryAddress btcutil.Address\n\tTheirDeliveryAddress btcutil.Address\n\n\t\/\/ In blocks\n\tCsvDelay uint32\n\n\t\/\/ TODO(roasbeef): track fees, other stats?\n\tNumUpdates uint64\n\tTotalSatoshisSent uint64\n\tTotalSatoshisReceived uint64\n\tCreationTime time.Time\n}\n\n\/\/ Encode...\n\/\/ TODO(roasbeef): checksum\nfunc (o *OpenChannelState) Encode(b io.Writer, addrManager *waddrmgr.Manager) error {\n\tif _, err := b.Write(o.TheirLNID[:]); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.ChanID[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, uint64(o.MinFeePerKb)); err != nil {\n\t\treturn err\n\t}\n\n\tencryptedPriv, err := addrManager.Encrypt(waddrmgr.CKTPrivate,\n\t\to.OurCommitKey.Serialize())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(encryptedPriv); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.TheirCommitKey.SerializeCompressed()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, uint64(o.Capacity)); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, uint64(o.OurBalance)); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, uint64(o.TheirBalance)); err != nil {\n\t\treturn err\n\t}\n\n\tif err := o.TheirCommitTx.Serialize(b); err != nil {\n\t\treturn err\n\t}\n\tif err := o.OurCommitTx.Serialize(b); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.TheirCommitSig[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := o.FundingTx.Serialize(b); err != nil {\n\t\treturn err\n\t}\n\n\tencryptedPriv, err = addrManager.Encrypt(waddrmgr.CKTPrivate,\n\t\to.MultiSigKey.Serialize())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(encryptedPriv); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write(o.FundingRedeemScript); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Write(o.TheirCurrentRevocation[:]); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO(roasbeef): serialize shachains\n\n\tif _, err := b.Write([]byte(o.OurDeliveryAddress.EncodeAddress())); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Write([]byte(o.TheirDeliveryAddress.EncodeAddress())); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, o.CsvDelay); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, o.NumUpdates); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, o.TotalSatoshisSent); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Write(b, endian, o.TotalSatoshisReceived); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(b, endian, o.CreationTime.Unix()); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Decode...\nfunc (o *OpenChannelState) Decode(b io.Reader, addrManager *waddrmgr.Manager) error {\n\tvar scratch [8]byte\n\n\tif _, err := b.Read(o.TheirLNID[:]); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.Read(o.ChanID[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.MinFeePerKb = btcutil.Amount(endian.Uint64(scratch[:]))\n\n\t\/\/ nonce + serPrivKey + mac\n\tvar encryptedPriv [24 + 32 + 16]byte\n\tif _, err := b.Read(encryptedPriv[:]); err != nil {\n\t\treturn err\n\t}\n\tdecryptedPriv, err := addrManager.Decrypt(waddrmgr.CKTPrivate, encryptedPriv[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\to.OurCommitKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), decryptedPriv)\n\n\tvar serPubKey [33]byte\n\tif _, err := b.Read(serPubKey[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirCommitKey, err = btcec.ParsePubKey(serPubKey[:], btcec.S256())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.Capacity = btcutil.Amount(endian.Uint64(scratch[:]))\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.OurBalance = btcutil.Amount(endian.Uint64(scratch[:]))\n\tif _, err := b.Read(scratch[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirBalance = btcutil.Amount(endian.Uint64(scratch[:]))\n\n\to.TheirCommitTx = wire.NewMsgTx()\n\tif err := o.TheirCommitTx.Deserialize(b); err != nil {\n\t\treturn err\n\t}\n\to.OurCommitTx = wire.NewMsgTx()\n\tif err := o.OurCommitTx.Deserialize(b); err != nil {\n\t\treturn err\n\t}\n\n\tvar sig [64]byte\n\tif _, err := b.Read(sig[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirCommitSig = sig[:]\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.FundingTx = wire.NewMsgTx()\n\tif err := o.FundingTx.Deserialize(b); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(encryptedPriv[:]); err != nil {\n\t\treturn err\n\t}\n\tdecryptedPriv, err = addrManager.Decrypt(waddrmgr.CKTPrivate, encryptedPriv[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\to.MultiSigKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), decryptedPriv)\n\n\tvar redeemScript [71]byte\n\tif _, err := b.Read(redeemScript[:]); err != nil {\n\t\treturn err\n\t}\n\to.FundingRedeemScript = redeemScript[:]\n\n\tif _, err := b.Read(o.TheirCurrentRevocation[:]); err != nil {\n\t\treturn err\n\t}\n\n\tvar addr [34]byte\n\tif _, err := b.Read(addr[:]); err != nil {\n\t\treturn err\n\t}\n\to.OurDeliveryAddress, err = btcutil.DecodeAddress(string(addr[:]), ActiveNetParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := b.Read(addr[:]); err != nil {\n\t\treturn err\n\t}\n\to.TheirDeliveryAddress, err = btcutil.DecodeAddress(string(addr[:]), ActiveNetParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Read(b, endian, &o.CsvDelay); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Read(b, endian, &o.NumUpdates); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Read(b, endian, &o.TotalSatoshisSent); err != nil {\n\t\treturn err\n\t}\n\tif err := binary.Read(b, endian, &o.TotalSatoshisReceived); err != nil {\n\t\treturn err\n\t}\n\n\tvar unix int64\n\tif err := binary.Read(b, endian, &unix); err != nil {\n\t\treturn err\n\t}\n\to.CreationTime = time.Unix(unix, 0)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package lnwallet\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/roasbeef\/btcd\/btcec\"\n\t\"github.com\/roasbeef\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/roasbeef\/btcd\/txscript\"\n\t\"github.com\/roasbeef\/btcd\/wire\"\n\t\"github.com\/roasbeef\/btcutil\"\n)\n\n\/\/ ErrNotMine is an error denoting that a WalletController instance is unable\n\/\/ to spend a specifid output.\nvar ErrNotMine = errors.New(\"the passed output doesn't belong to the wallet\")\n\n\/\/ AddressType is a enum-like type which denotes the possible address types\n\/\/ WalletController supports.\ntype AddressType uint8\n\nconst (\n\t\/\/ WitnessPubKey represents a p2wkh address.\n\tWitnessPubKey AddressType = iota\n\n\t\/\/ NestedWitnessPubKey represents a p2sh output which is itself a\n\t\/\/ nested p2wkh output.\n\tNestedWitnessPubKey\n\n\t\/\/ PubKeyHash represents a regular p2pkh output.\n\tPubKeyHash\n)\n\n\/\/ Utxo is an unspent output denoted by its outpoint, and output value of the\n\/\/ original output.\ntype Utxo struct {\n\tValue btcutil.Amount\n\twire.OutPoint\n}\n\n\/\/ TransactionDetail describes a transaction with either inputs which belong to\n\/\/ the wallet, or has outputs that pay to the wallet.\ntype TransactionDetail struct {\n\t\/\/ Hash is the transaction hash of the transaction.\n\tHash chainhash.Hash\n\n\t\/\/ Value is the net value of this transaction (in satoshis) from the\n\t\/\/ PoV of the wallet. If this transaction purely spends from the\n\t\/\/ wallet's funds, then this value will be negative. Similarly, if this\n\t\/\/ transaction credits the wallet, then this value will be positive.\n\tValue btcutil.Amount\n\n\t\/\/ NumConfirmations is the number of confirmations this transaction\n\t\/\/ has. If the transaction is unconfirmed, then this value will be\n\t\/\/ zero.\n\tNumConfirmations int32\n\n\t\/\/ BlockHeight is the hash of the block which includes this\n\t\/\/ transaction. Unconfirmed transactions will have a nil value for this\n\t\/\/ field.\n\tBlockHash *chainhash.Hash\n\n\t\/\/ BlockHeight is the height of the block including this transaction.\n\t\/\/ Unconfirmed transaction will show a height of zero.\n\tBlockHeight int32\n\n\t\/\/ Timestamp is the unix timestamp of the block including this\n\t\/\/ transaction. If the transaction is unconfirmed, then this will be a\n\t\/\/ timestamp of txn creation.\n\tTimestamp int64\n\n\t\/\/ TotalFees is the total fee in satoshis paid by this transaction.\n\tTotalFees int64\n}\n\n\/\/ TransactionSubscription is an interface which describes an object capable of\n\/\/ receiving notifications of new transaction related to the underlying wallet.\n\/\/ TODO(roasbeef): add balance updates?\ntype TransactionSubscription interface {\n\t\/\/ ConfirmedTransactions returns a channel which will be sent on as new\n\t\/\/ relevant transactions are confirmed.\n\tConfirmedTransactions() chan *TransactionDetail\n\n\t\/\/ UnconfirmedTransactions returns a channel which will be sent on as\n\t\/\/ new relevant transactions are seen within the network.\n\tUnconfirmedTransactions() chan *TransactionDetail\n\n\t\/\/ Cancel finalizes the subscription, cleaning up any resources\n\t\/\/ allocated.\n\tCancel()\n}\n\n\/\/ WalletController defines an abstract interface for controlling a local Pure\n\/\/ Go wallet, a local or remote wallet via an RPC mechanism, or possibly even\n\/\/ a daemon assisted hardware wallet. This interface serves the purpose of\n\/\/ allowing LightningWallet to be seamlessly compatible with several wallets\n\/\/ such as: uspv, btcwallet, Bitcoin Core, Electrum, etc. This interface then\n\/\/ serves as a \"base wallet\", with Lightning Network awareness taking place at\n\/\/ a \"higher\" level of abstraction. Essentially, an overlay wallet.\n\/\/ Implementors of this interface must closely adhere to the documented\n\/\/ behavior of all interface methods in order to ensure identical behavior\n\/\/ across all concrete implementations.\ntype WalletController interface {\n\t\/\/ FetchInputInfo queries for the WalletController's knowledge of the\n\t\/\/ passed outpoint. If the base wallet determines this output is under\n\t\/\/ its control, then the original txout should be returned. Otherwise,\n\t\/\/ a non-nil error value of ErrNotMine should be returned instead.\n\tFetchInputInfo(prevOut *wire.OutPoint) (*wire.TxOut, error)\n\n\t\/\/ ConfirmedBalance returns the sum of all the wallet's unspent outputs\n\t\/\/ that have at least confs confirmations. If confs is set to zero,\n\t\/\/ then all unspent outputs, including those currently in the mempool\n\t\/\/ will be included in the final sum.\n\tConfirmedBalance(confs int32, witness bool) (btcutil.Amount, error)\n\n\t\/\/ NewAddress returns the next external or internal address for the\n\t\/\/ wallet dictated by the value of the `change` parameter. If change is\n\t\/\/ true, then an internal address should be used, otherwise an external\n\t\/\/ address should be returned. The type of address returned is dictated\n\t\/\/ by the wallet's capabilities, and may be of type: p2sh, p2pkh,\n\t\/\/ p2wkh, p2wsh, etc.\n\tNewAddress(addrType AddressType, change bool) (btcutil.Address, error)\n\n\t\/\/ GetPrivKey retrives the underlying private key associated with the\n\t\/\/ passed address. If the wallet is unable to locate this private key\n\t\/\/ due to the address not being under control of the wallet, then an\n\t\/\/ error should be returned.\n\tGetPrivKey(a btcutil.Address) (*btcec.PrivateKey, error)\n\n\t\/\/ NewRawKey returns a raw private key controlled by the wallet. These\n\t\/\/ keys are used for the 2-of-2 multi-sig outputs for funding\n\t\/\/ transactions, as well as the pub key used for commitment transactions.\n\t\/\/\n\t\/\/ NOTE: The wallet MUST watch for on-chain outputs created to a p2wpkh\n\t\/\/ script using keys returned by this function.\n\tNewRawKey() (*btcec.PublicKey, error)\n\n\t\/\/ FetchRootKey returns a root key which will be used by the\n\t\/\/ LightningWallet to deterministically generate secrets. The private\n\t\/\/ key returned by this method should remain constant in-between\n\t\/\/ WalletController restarts.\n\tFetchRootKey() (*btcec.PrivateKey, error)\n\n\t\/\/ SendOutputs funds, signs, and broadcasts a Bitcoin transaction\n\t\/\/ paying out to the specified outputs. In the case the wallet has\n\t\/\/ insufficient funds, or the outputs are non-standard, an error\n\t\/\/ should be returned.\n\tSendOutputs(outputs []*wire.TxOut) (*chainhash.Hash, error)\n\n\t\/\/ ListUnspentWitness returns all unspent outputs which are version 0\n\t\/\/ witness programs. The 'confirms' parameter indicates the minimum\n\t\/\/ number of confirmations an output needs in order to be returned by\n\t\/\/ this method. Passing -1 as 'confirms' indicates that even\n\t\/\/ unconfirmed outputs should be returned.\n\tListUnspentWitness(confirms int32) ([]*Utxo, error)\n\n\t\/\/ ListTransactionDetails returns a list of all transactions which are\n\t\/\/ relevant to the wallet.\n\tListTransactionDetails() ([]*TransactionDetail, error)\n\n\t\/\/ LockOutpoint marks an outpoint as locked meaning it will no longer\n\t\/\/ be deemed as eligible for coin selection. Locking outputs are\n\t\/\/ utilized in order to avoid race conditions when selecting inputs for\n\t\/\/ usage when funding a channel.\n\tLockOutpoint(o wire.OutPoint)\n\n\t\/\/ UnlockOutpoint unlocks an previously locked output, marking it\n\t\/\/ eligible for coin selection.\n\tUnlockOutpoint(o wire.OutPoint)\n\n\t\/\/ PublishTransaction performs cursory validation (dust checks, etc),\n\t\/\/ then finally broadcasts the passed transaction to the Bitcoin network.\n\tPublishTransaction(tx *wire.MsgTx) error\n\n\t\/\/ SubscribeTransactions returns a TransactionSubscription client which\n\t\/\/ is capable of receiving async notifications as new transactions\n\t\/\/ related to the wallet are seen within the network, or found in\n\t\/\/ blocks.\n\t\/\/\n\t\/\/ NOTE: a non-nil error should be returned if notifications aren't\n\t\/\/ supported.\n\t\/\/\n\t\/\/ TODO(roasbeef): make distinct interface?\n\tSubscribeTransactions() (TransactionSubscription, error)\n\n\t\/\/ IsSynced returns a boolean indicating if from the PoV of the wallet,\n\t\/\/ it has fully synced to the current best block in the main chain.\n\tIsSynced() (bool, error)\n\n\t\/\/ Start initializes the wallet, making any necessary connections,\n\t\/\/ starting up required goroutines etc.\n\tStart() error\n\n\t\/\/ Stop signals the wallet for shutdown. Shutdown may entail closing\n\t\/\/ any active sockets, database handles, stopping goroutines, etc.\n\tStop() error\n}\n\n\/\/ BlockChainIO is a dedicated source which will be used to obtain queries\n\/\/ related to the current state of the blockchain. The data returned by each of\n\/\/ the defined methods within this interface should always return the most up\n\/\/ to date data possible.\n\/\/\n\/\/ TODO(roasbeef): move to diff package perhaps?\n\/\/ TODO(roasbeef): move publish txn here?\ntype BlockChainIO interface {\n\t\/\/ GetBestBlock returns the current height and block hash of the valid\n\t\/\/ most-work chain the implementation is aware of.\n\tGetBestBlock() (*chainhash.Hash, int32, error)\n\n\t\/\/ GetTxOut returns the original output referenced by the passed\n\t\/\/ outpoint.\n\tGetUtxo(txid *chainhash.Hash, index uint32) (*wire.TxOut, error)\n\n\t\/\/ GetTransaction returns the full transaction identified by the passed\n\t\/\/ transaction ID.\n\tGetTransaction(txid *chainhash.Hash) (*wire.MsgTx, error)\n\n\t\/\/ GetBlockHash returns the hash of the block in the best blockchain\n\t\/\/ at the given height.\n\tGetBlockHash(blockHeight int64) (*chainhash.Hash, error)\n\n\t\/\/ GetBlock returns the block in the main chain identified by the given\n\t\/\/ hash.\n\tGetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error)\n}\n\n\/\/ SignDescriptor houses the necessary information required to successfully sign\n\/\/ a given output. This struct is used by the Signer interface in order to gain\n\/\/ access to critical data needed to generate a valid signature.\ntype SignDescriptor struct {\n\t\/\/ Pubkey is the public key to which the signature should be generated\n\t\/\/ over. The Signer should then generate a signature with the private\n\t\/\/ key corresponding to this public key.\n\tPubKey *btcec.PublicKey\n\n\t\/\/ PrivateTweak is a scalar value that should be added to the private\n\t\/\/ key corresponding to the above public key to obtain the private key\n\t\/\/ to be used to sign this input. This value is typically a leaf node\n\t\/\/ from the revocation tree.\n\t\/\/\n\t\/\/ NOTE: If this value is nil, then the input can be signed using only\n\t\/\/ the above public key.\n\tPrivateTweak []byte\n\n\t\/\/ WitnessScript is the full script required to properly redeem the\n\t\/\/ output. This field will only be populated if a p2wsh or a p2sh\n\t\/\/ output is being signed.\n\tWitnessScript []byte\n\n\t\/\/ Output is the target output which should be signed. The PkScript and\n\t\/\/ Value fields within the output should be properly populated,\n\t\/\/ otherwise an invalid signature may be generated.\n\tOutput *wire.TxOut\n\n\t\/\/ HashType is the target sighash type that should be used when\n\t\/\/ generating the final sighash, and signature.\n\tHashType txscript.SigHashType\n\n\t\/\/ SigHashes is the pre-computed sighash midstate to be used when\n\t\/\/ generating the final sighash for signing.\n\tSigHashes *txscript.TxSigHashes\n\n\t\/\/ InputIndex is the target input within the transaction that should be\n\t\/\/ signed.\n\tInputIndex int\n}\n\n\/\/ Signer represents an abstract object capable of generating raw signatures as\n\/\/ well as full complete input scripts given a valid SignDescriptor and\n\/\/ transaction. This interface fully abstracts away signing paving the way for\n\/\/ Signer implementations such as hardware wallets, hardware tokens, HSM's, or\n\/\/ simply a regular wallet.\ntype Signer interface {\n\t\/\/ SignOutputRaw generates a signature for the passed transaction\n\t\/\/ according to the data within the passed SignDescriptor.\n\t\/\/\n\t\/\/ NOTE: The resulting signature should be void of a sighash byte.\n\tSignOutputRaw(tx *wire.MsgTx, signDesc *SignDescriptor) ([]byte, error)\n\n\t\/\/ ComputeInputScript generates a complete InputIndex for the passed\n\t\/\/ transaction with the signature as defined within the passed\n\t\/\/ SignDescriptor. This method should be capable of generating the\n\t\/\/ proper input script for both regular p2wkh output and p2wkh outputs\n\t\/\/ nested within a regular p2sh output.\n\tComputeInputScript(tx *wire.MsgTx, signDesc *SignDescriptor) (*InputScript, error)\n}\n\n\/\/ WalletDriver represents a \"driver\" for a particular concrete\n\/\/ WalletController implementation. A driver is identified by a globally unique\n\/\/ string identifier along with a 'New()' method which is responsible for\n\/\/ initializing a particular WalletController concrete implementation.\ntype WalletDriver struct {\n\t\/\/ WalletType is a string which uniquely identifes the WalletController\n\t\/\/ that this driver, drives.\n\tWalletType string\n\n\t\/\/ New creates a new instance of a concrete WalletController\n\t\/\/ implementation given a variadic set up arguments. The function takes\n\t\/\/ a varidaic number of interface parameters in order to provide\n\t\/\/ initialization flexibility, thereby accommodating several potential\n\t\/\/ WalletController implementations.\n\tNew func(args ...interface{}) (WalletController, error)\n}\n\nvar (\n\twallets = make(map[string]*WalletDriver)\n\tregisterMtx sync.Mutex\n)\n\n\/\/ RegisteredWallets returns a slice of all currently registered notifiers.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc RegisteredWallets() []*WalletDriver {\n\tregisterMtx.Lock()\n\tdefer registerMtx.Unlock()\n\n\tregisteredWallets := make([]*WalletDriver, 0, len(wallets))\n\tfor _, wallet := range wallets {\n\t\tregisteredWallets = append(registeredWallets, wallet)\n\t}\n\n\treturn registeredWallets\n}\n\n\/\/ RegisterWallet registers a WalletDriver which is capable of driving a\n\/\/ concrete WalletController interface. In the case that this driver has\n\/\/ already been registered, an error is returned.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc RegisterWallet(driver *WalletDriver) error {\n\tregisterMtx.Lock()\n\tdefer registerMtx.Unlock()\n\n\tif _, ok := wallets[driver.WalletType]; ok {\n\t\treturn fmt.Errorf(\"wallet already registered\")\n\t}\n\n\twallets[driver.WalletType] = driver\n\n\treturn nil\n}\n\n\/\/ SupportedWallets returns a slice of strings that represents the wallet\n\/\/ drivers that have been registered and are therefore supported.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc SupportedWallets() []string {\n\tregisterMtx.Lock()\n\tdefer registerMtx.Unlock()\n\n\tsupportedWallets := make([]string, 0, len(wallets))\n\tfor walletName := range wallets {\n\t\tsupportedWallets = append(supportedWallets, walletName)\n\t}\n\n\treturn supportedWallets\n}\n<commit_msg>lnwallet: introduce the MessageSigner interface<commit_after>package lnwallet\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/roasbeef\/btcd\/btcec\"\n\t\"github.com\/roasbeef\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/roasbeef\/btcd\/txscript\"\n\t\"github.com\/roasbeef\/btcd\/wire\"\n\t\"github.com\/roasbeef\/btcutil\"\n)\n\n\/\/ ErrNotMine is an error denoting that a WalletController instance is unable\n\/\/ to spend a specifid output.\nvar ErrNotMine = errors.New(\"the passed output doesn't belong to the wallet\")\n\n\/\/ AddressType is a enum-like type which denotes the possible address types\n\/\/ WalletController supports.\ntype AddressType uint8\n\nconst (\n\t\/\/ WitnessPubKey represents a p2wkh address.\n\tWitnessPubKey AddressType = iota\n\n\t\/\/ NestedWitnessPubKey represents a p2sh output which is itself a\n\t\/\/ nested p2wkh output.\n\tNestedWitnessPubKey\n\n\t\/\/ PubKeyHash represents a regular p2pkh output.\n\tPubKeyHash\n)\n\n\/\/ Utxo is an unspent output denoted by its outpoint, and output value of the\n\/\/ original output.\ntype Utxo struct {\n\tValue btcutil.Amount\n\twire.OutPoint\n}\n\n\/\/ TransactionDetail describes a transaction with either inputs which belong to\n\/\/ the wallet, or has outputs that pay to the wallet.\ntype TransactionDetail struct {\n\t\/\/ Hash is the transaction hash of the transaction.\n\tHash chainhash.Hash\n\n\t\/\/ Value is the net value of this transaction (in satoshis) from the\n\t\/\/ PoV of the wallet. If this transaction purely spends from the\n\t\/\/ wallet's funds, then this value will be negative. Similarly, if this\n\t\/\/ transaction credits the wallet, then this value will be positive.\n\tValue btcutil.Amount\n\n\t\/\/ NumConfirmations is the number of confirmations this transaction\n\t\/\/ has. If the transaction is unconfirmed, then this value will be\n\t\/\/ zero.\n\tNumConfirmations int32\n\n\t\/\/ BlockHeight is the hash of the block which includes this\n\t\/\/ transaction. Unconfirmed transactions will have a nil value for this\n\t\/\/ field.\n\tBlockHash *chainhash.Hash\n\n\t\/\/ BlockHeight is the height of the block including this transaction.\n\t\/\/ Unconfirmed transaction will show a height of zero.\n\tBlockHeight int32\n\n\t\/\/ Timestamp is the unix timestamp of the block including this\n\t\/\/ transaction. If the transaction is unconfirmed, then this will be a\n\t\/\/ timestamp of txn creation.\n\tTimestamp int64\n\n\t\/\/ TotalFees is the total fee in satoshis paid by this transaction.\n\tTotalFees int64\n}\n\n\/\/ TransactionSubscription is an interface which describes an object capable of\n\/\/ receiving notifications of new transaction related to the underlying wallet.\n\/\/ TODO(roasbeef): add balance updates?\ntype TransactionSubscription interface {\n\t\/\/ ConfirmedTransactions returns a channel which will be sent on as new\n\t\/\/ relevant transactions are confirmed.\n\tConfirmedTransactions() chan *TransactionDetail\n\n\t\/\/ UnconfirmedTransactions returns a channel which will be sent on as\n\t\/\/ new relevant transactions are seen within the network.\n\tUnconfirmedTransactions() chan *TransactionDetail\n\n\t\/\/ Cancel finalizes the subscription, cleaning up any resources\n\t\/\/ allocated.\n\tCancel()\n}\n\n\/\/ WalletController defines an abstract interface for controlling a local Pure\n\/\/ Go wallet, a local or remote wallet via an RPC mechanism, or possibly even\n\/\/ a daemon assisted hardware wallet. This interface serves the purpose of\n\/\/ allowing LightningWallet to be seamlessly compatible with several wallets\n\/\/ such as: uspv, btcwallet, Bitcoin Core, Electrum, etc. This interface then\n\/\/ serves as a \"base wallet\", with Lightning Network awareness taking place at\n\/\/ a \"higher\" level of abstraction. Essentially, an overlay wallet.\n\/\/ Implementors of this interface must closely adhere to the documented\n\/\/ behavior of all interface methods in order to ensure identical behavior\n\/\/ across all concrete implementations.\ntype WalletController interface {\n\t\/\/ FetchInputInfo queries for the WalletController's knowledge of the\n\t\/\/ passed outpoint. If the base wallet determines this output is under\n\t\/\/ its control, then the original txout should be returned. Otherwise,\n\t\/\/ a non-nil error value of ErrNotMine should be returned instead.\n\tFetchInputInfo(prevOut *wire.OutPoint) (*wire.TxOut, error)\n\n\t\/\/ ConfirmedBalance returns the sum of all the wallet's unspent outputs\n\t\/\/ that have at least confs confirmations. If confs is set to zero,\n\t\/\/ then all unspent outputs, including those currently in the mempool\n\t\/\/ will be included in the final sum.\n\tConfirmedBalance(confs int32, witness bool) (btcutil.Amount, error)\n\n\t\/\/ NewAddress returns the next external or internal address for the\n\t\/\/ wallet dictated by the value of the `change` parameter. If change is\n\t\/\/ true, then an internal address should be used, otherwise an external\n\t\/\/ address should be returned. The type of address returned is dictated\n\t\/\/ by the wallet's capabilities, and may be of type: p2sh, p2pkh,\n\t\/\/ p2wkh, p2wsh, etc.\n\tNewAddress(addrType AddressType, change bool) (btcutil.Address, error)\n\n\t\/\/ GetPrivKey retrives the underlying private key associated with the\n\t\/\/ passed address. If the wallet is unable to locate this private key\n\t\/\/ due to the address not being under control of the wallet, then an\n\t\/\/ error should be returned.\n\tGetPrivKey(a btcutil.Address) (*btcec.PrivateKey, error)\n\n\t\/\/ NewRawKey returns a raw private key controlled by the wallet. These\n\t\/\/ keys are used for the 2-of-2 multi-sig outputs for funding\n\t\/\/ transactions, as well as the pub key used for commitment transactions.\n\t\/\/\n\t\/\/ NOTE: The wallet MUST watch for on-chain outputs created to a p2wpkh\n\t\/\/ script using keys returned by this function.\n\tNewRawKey() (*btcec.PublicKey, error)\n\n\t\/\/ FetchRootKey returns a root key which will be used by the\n\t\/\/ LightningWallet to deterministically generate secrets. The private\n\t\/\/ key returned by this method should remain constant in-between\n\t\/\/ WalletController restarts.\n\tFetchRootKey() (*btcec.PrivateKey, error)\n\n\t\/\/ SendOutputs funds, signs, and broadcasts a Bitcoin transaction\n\t\/\/ paying out to the specified outputs. In the case the wallet has\n\t\/\/ insufficient funds, or the outputs are non-standard, an error\n\t\/\/ should be returned.\n\tSendOutputs(outputs []*wire.TxOut) (*chainhash.Hash, error)\n\n\t\/\/ ListUnspentWitness returns all unspent outputs which are version 0\n\t\/\/ witness programs. The 'confirms' parameter indicates the minimum\n\t\/\/ number of confirmations an output needs in order to be returned by\n\t\/\/ this method. Passing -1 as 'confirms' indicates that even\n\t\/\/ unconfirmed outputs should be returned.\n\tListUnspentWitness(confirms int32) ([]*Utxo, error)\n\n\t\/\/ ListTransactionDetails returns a list of all transactions which are\n\t\/\/ relevant to the wallet.\n\tListTransactionDetails() ([]*TransactionDetail, error)\n\n\t\/\/ LockOutpoint marks an outpoint as locked meaning it will no longer\n\t\/\/ be deemed as eligible for coin selection. Locking outputs are\n\t\/\/ utilized in order to avoid race conditions when selecting inputs for\n\t\/\/ usage when funding a channel.\n\tLockOutpoint(o wire.OutPoint)\n\n\t\/\/ UnlockOutpoint unlocks an previously locked output, marking it\n\t\/\/ eligible for coin selection.\n\tUnlockOutpoint(o wire.OutPoint)\n\n\t\/\/ PublishTransaction performs cursory validation (dust checks, etc),\n\t\/\/ then finally broadcasts the passed transaction to the Bitcoin network.\n\tPublishTransaction(tx *wire.MsgTx) error\n\n\t\/\/ SubscribeTransactions returns a TransactionSubscription client which\n\t\/\/ is capable of receiving async notifications as new transactions\n\t\/\/ related to the wallet are seen within the network, or found in\n\t\/\/ blocks.\n\t\/\/\n\t\/\/ NOTE: a non-nil error should be returned if notifications aren't\n\t\/\/ supported.\n\t\/\/\n\t\/\/ TODO(roasbeef): make distinct interface?\n\tSubscribeTransactions() (TransactionSubscription, error)\n\n\t\/\/ IsSynced returns a boolean indicating if from the PoV of the wallet,\n\t\/\/ it has fully synced to the current best block in the main chain.\n\tIsSynced() (bool, error)\n\n\t\/\/ Start initializes the wallet, making any necessary connections,\n\t\/\/ starting up required goroutines etc.\n\tStart() error\n\n\t\/\/ Stop signals the wallet for shutdown. Shutdown may entail closing\n\t\/\/ any active sockets, database handles, stopping goroutines, etc.\n\tStop() error\n}\n\n\/\/ BlockChainIO is a dedicated source which will be used to obtain queries\n\/\/ related to the current state of the blockchain. The data returned by each of\n\/\/ the defined methods within this interface should always return the most up\n\/\/ to date data possible.\n\/\/\n\/\/ TODO(roasbeef): move to diff package perhaps?\n\/\/ TODO(roasbeef): move publish txn here?\ntype BlockChainIO interface {\n\t\/\/ GetBestBlock returns the current height and block hash of the valid\n\t\/\/ most-work chain the implementation is aware of.\n\tGetBestBlock() (*chainhash.Hash, int32, error)\n\n\t\/\/ GetTxOut returns the original output referenced by the passed\n\t\/\/ outpoint.\n\tGetUtxo(txid *chainhash.Hash, index uint32) (*wire.TxOut, error)\n\n\t\/\/ GetTransaction returns the full transaction identified by the passed\n\t\/\/ transaction ID.\n\tGetTransaction(txid *chainhash.Hash) (*wire.MsgTx, error)\n\n\t\/\/ GetBlockHash returns the hash of the block in the best blockchain\n\t\/\/ at the given height.\n\tGetBlockHash(blockHeight int64) (*chainhash.Hash, error)\n\n\t\/\/ GetBlock returns the block in the main chain identified by the given\n\t\/\/ hash.\n\tGetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error)\n}\n\n\/\/ SignDescriptor houses the necessary information required to successfully sign\n\/\/ a given output. This struct is used by the Signer interface in order to gain\n\/\/ access to critical data needed to generate a valid signature.\ntype SignDescriptor struct {\n\t\/\/ Pubkey is the public key to which the signature should be generated\n\t\/\/ over. The Signer should then generate a signature with the private\n\t\/\/ key corresponding to this public key.\n\tPubKey *btcec.PublicKey\n\n\t\/\/ PrivateTweak is a scalar value that should be added to the private\n\t\/\/ key corresponding to the above public key to obtain the private key\n\t\/\/ to be used to sign this input. This value is typically a leaf node\n\t\/\/ from the revocation tree.\n\t\/\/\n\t\/\/ NOTE: If this value is nil, then the input can be signed using only\n\t\/\/ the above public key.\n\tPrivateTweak []byte\n\n\t\/\/ WitnessScript is the full script required to properly redeem the\n\t\/\/ output. This field will only be populated if a p2wsh or a p2sh\n\t\/\/ output is being signed.\n\tWitnessScript []byte\n\n\t\/\/ Output is the target output which should be signed. The PkScript and\n\t\/\/ Value fields within the output should be properly populated,\n\t\/\/ otherwise an invalid signature may be generated.\n\tOutput *wire.TxOut\n\n\t\/\/ HashType is the target sighash type that should be used when\n\t\/\/ generating the final sighash, and signature.\n\tHashType txscript.SigHashType\n\n\t\/\/ SigHashes is the pre-computed sighash midstate to be used when\n\t\/\/ generating the final sighash for signing.\n\tSigHashes *txscript.TxSigHashes\n\n\t\/\/ InputIndex is the target input within the transaction that should be\n\t\/\/ signed.\n\tInputIndex int\n}\n\n\/\/ Signer represents an abstract object capable of generating raw signatures as\n\/\/ well as full complete input scripts given a valid SignDescriptor and\n\/\/ transaction. This interface fully abstracts away signing paving the way for\n\/\/ Signer implementations such as hardware wallets, hardware tokens, HSM's, or\n\/\/ simply a regular wallet.\ntype Signer interface {\n\t\/\/ SignOutputRaw generates a signature for the passed transaction\n\t\/\/ according to the data within the passed SignDescriptor.\n\t\/\/\n\t\/\/ NOTE: The resulting signature should be void of a sighash byte.\n\tSignOutputRaw(tx *wire.MsgTx, signDesc *SignDescriptor) ([]byte, error)\n\n\t\/\/ ComputeInputScript generates a complete InputIndex for the passed\n\t\/\/ transaction with the signature as defined within the passed\n\t\/\/ SignDescriptor. This method should be capable of generating the\n\t\/\/ proper input script for both regular p2wkh output and p2wkh outputs\n\t\/\/ nested within a regular p2sh output.\n\tComputeInputScript(tx *wire.MsgTx, signDesc *SignDescriptor) (*InputScript, error)\n}\n\n\/\/ MessageSigner represents an abstract object capable of signing arbitrary\n\/\/ messages. The capabilities of this interface are used to sign announcements\n\/\/ to the network, or just arbitrary messages that leverage the wallet's keys\n\/\/ to attest to some message.\ntype MessageSigner interface {\n\t\/\/ SignMessage attempts to sign a target message with the private key\n\t\/\/ that corresponds to the passed public key. If the target private key\n\t\/\/ is unable to be found, then an error will be returned. The actual\n\t\/\/ digest signed is the double SHA-256 of the passed message.\n\tSignMessage(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error)\n}\n\n\/\/ WalletDriver represents a \"driver\" for a particular concrete\n\/\/ WalletController implementation. A driver is identified by a globally unique\n\/\/ string identifier along with a 'New()' method which is responsible for\n\/\/ initializing a particular WalletController concrete implementation.\ntype WalletDriver struct {\n\t\/\/ WalletType is a string which uniquely identifes the WalletController\n\t\/\/ that this driver, drives.\n\tWalletType string\n\n\t\/\/ New creates a new instance of a concrete WalletController\n\t\/\/ implementation given a variadic set up arguments. The function takes\n\t\/\/ a varidaic number of interface parameters in order to provide\n\t\/\/ initialization flexibility, thereby accommodating several potential\n\t\/\/ WalletController implementations.\n\tNew func(args ...interface{}) (WalletController, error)\n}\n\nvar (\n\twallets = make(map[string]*WalletDriver)\n\tregisterMtx sync.Mutex\n)\n\n\/\/ RegisteredWallets returns a slice of all currently registered notifiers.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc RegisteredWallets() []*WalletDriver {\n\tregisterMtx.Lock()\n\tdefer registerMtx.Unlock()\n\n\tregisteredWallets := make([]*WalletDriver, 0, len(wallets))\n\tfor _, wallet := range wallets {\n\t\tregisteredWallets = append(registeredWallets, wallet)\n\t}\n\n\treturn registeredWallets\n}\n\n\/\/ RegisterWallet registers a WalletDriver which is capable of driving a\n\/\/ concrete WalletController interface. In the case that this driver has\n\/\/ already been registered, an error is returned.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc RegisterWallet(driver *WalletDriver) error {\n\tregisterMtx.Lock()\n\tdefer registerMtx.Unlock()\n\n\tif _, ok := wallets[driver.WalletType]; ok {\n\t\treturn fmt.Errorf(\"wallet already registered\")\n\t}\n\n\twallets[driver.WalletType] = driver\n\n\treturn nil\n}\n\n\/\/ SupportedWallets returns a slice of strings that represents the wallet\n\/\/ drivers that have been registered and are therefore supported.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc SupportedWallets() []string {\n\tregisterMtx.Lock()\n\tdefer registerMtx.Unlock()\n\n\tsupportedWallets := make([]string, 0, len(wallets))\n\tfor walletName := range wallets {\n\t\tsupportedWallets = append(supportedWallets, walletName)\n\t}\n\n\treturn supportedWallets\n}\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/endpoints\"\n\t\"github.com\/lxc\/lxd\/lxd\/task\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\tlog \"github.com\/lxc\/lxd\/shared\/log15\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\nvar listeners = map[string]*lxd.EventListener{}\nvar listenersLock sync.Mutex\n\n\/\/ Events starts a task that continuously monitors the list of cluster nodes and\n\/\/ maintains a pool of websocket connections against all of them, in order to\n\/\/ get notified about events.\n\/\/\n\/\/ Whenever an event is received the given callback is invoked.\nfunc Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, serverCert func() *shared.CertInfo, f func(int64, api.Event)) (task.Func, task.Schedule) {\n\t\/\/ Update our pool of event listeners. Since database queries are\n\t\/\/ blocking, we spawn the actual logic in a goroutine, to abort\n\t\/\/ immediately when we receive the stop signal.\n\tupdate := func(ctx context.Context) {\n\t\tch := make(chan struct{})\n\t\tgo func() {\n\t\t\teventsUpdateListeners(endpoints, cluster, serverCert, nil, f)\n\t\t\tch <- struct{}{}\n\t\t}()\n\t\tselect {\n\t\tcase <-ch:\n\t\tcase <-ctx.Done():\n\t\t}\n\t}\n\n\tschedule := task.Every(time.Second)\n\n\treturn update, schedule\n}\n\nfunc eventsUpdateListeners(endpoints *endpoints.Endpoints, cluster *db.Cluster, serverCert func() *shared.CertInfo, members map[int64]APIHeartbeatMember, f func(int64, api.Event)) {\n\t\/\/ Get the current cluster nodes.\n\tvar nodes []db.NodeInfo\n\tvar offlineThreshold time.Duration\n\n\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\n\t\tnodes, err = tx.GetNodes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tofflineThreshold, err = tx.GetNodeOfflineThreshold()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlogger.Warn(\"Failed to get current cluster members\", log.Ctx{\"err\": err})\n\t\treturn\n\t}\n\tif len(nodes) == 1 {\n\t\treturn \/\/ Either we're not clustered or this is a single-node cluster\n\t}\n\n\taddress := endpoints.NetworkAddress()\n\n\taddresses := make([]string, len(nodes))\n\tfor i, node := range nodes {\n\t\taddresses[i] = node.Address\n\n\t\tif node.Address == address {\n\t\t\tcontinue\n\t\t}\n\n\t\tlistenersLock.Lock()\n\t\tlistener, ok := listeners[node.Address]\n\n\t\t\/\/ Don't bother trying to connect to offline nodes, or to ourselves.\n\t\tif node.IsOffline(offlineThreshold) {\n\t\t\tif ok {\n\t\t\t\tlistener.Disconnect()\n\t\t\t}\n\n\t\t\tlistenersLock.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ The node has already a listener associated to it.\n\t\tif ok {\n\t\t\t\/\/ Double check that the listener is still\n\t\t\t\/\/ connected. If it is, just move on, other\n\t\t\t\/\/ we'll try to connect again.\n\t\t\tif listeners[node.Address].IsActive() {\n\t\t\t\tlistenersLock.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdelete(listeners, node.Address)\n\t\t}\n\t\tlistenersLock.Unlock()\n\n\t\tlistener, err := eventsConnect(node.Address, endpoints.NetworkCert(), serverCert())\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Failed to get events from member\", log.Ctx{\"address\": node.Address, \"err\": err})\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Debug(\"Listening for events on member\", log.Ctx{\"address\": node.Address})\n\t\tlistener.AddHandler(nil, func(event api.Event) { f(node.ID, event) })\n\n\t\tlistenersLock.Lock()\n\t\tlisteners[node.Address] = listener\n\t\tlistenersLock.Unlock()\n\t}\n\n\tlistenersLock.Lock()\n\tfor address, listener := range listeners {\n\t\tif !shared.StringInSlice(address, addresses) {\n\t\t\tlistener.Disconnect()\n\t\t\tdelete(listeners, address)\n\t\t}\n\t}\n\tlistenersLock.Unlock()\n}\n\n\/\/ Establish a client connection to get events from the given node.\nfunc eventsConnect(address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo) (*lxd.EventListener, error) {\n\tclient, err := Connect(address, networkCert, serverCert, nil, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set the project to the special wildcard in order to get notified\n\t\/\/ about all events across all projects.\n\tclient = client.UseProject(\"*\")\n\n\treturn client.GetEvents()\n}\n<commit_msg>lxd\/cluster\/events: Load members from global DB if no heartbeat members provided in eventsUpdateListeners<commit_after>package cluster\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/endpoints\"\n\t\"github.com\/lxc\/lxd\/lxd\/task\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\tlog \"github.com\/lxc\/lxd\/shared\/log15\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\nvar listeners = map[string]*lxd.EventListener{}\nvar listenersLock sync.Mutex\n\n\/\/ Events starts a task that continuously monitors the list of cluster nodes and\n\/\/ maintains a pool of websocket connections against all of them, in order to\n\/\/ get notified about events.\n\/\/\n\/\/ Whenever an event is received the given callback is invoked.\nfunc Events(endpoints *endpoints.Endpoints, cluster *db.Cluster, serverCert func() *shared.CertInfo, f func(int64, api.Event)) (task.Func, task.Schedule) {\n\t\/\/ Update our pool of event listeners. Since database queries are\n\t\/\/ blocking, we spawn the actual logic in a goroutine, to abort\n\t\/\/ immediately when we receive the stop signal.\n\tupdate := func(ctx context.Context) {\n\t\tch := make(chan struct{})\n\t\tgo func() {\n\t\t\teventsUpdateListeners(endpoints, cluster, serverCert, nil, f)\n\t\t\tch <- struct{}{}\n\t\t}()\n\t\tselect {\n\t\tcase <-ch:\n\t\tcase <-ctx.Done():\n\t\t}\n\t}\n\n\tschedule := task.Every(time.Second)\n\n\treturn update, schedule\n}\n\nfunc eventsUpdateListeners(endpoints *endpoints.Endpoints, cluster *db.Cluster, serverCert func() *shared.CertInfo, members map[int64]APIHeartbeatMember, f func(int64, api.Event)) {\n\t\/\/ If no heartbeat members provided, populate from global database.\n\tif members == nil {\n\t\tvar dbMembers []db.NodeInfo\n\t\tvar offlineThreshold time.Duration\n\n\t\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\t\tvar err error\n\n\t\t\tdbMembers, err = tx.GetNodes()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tofflineThreshold, err = tx.GetNodeOfflineThreshold()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Failed to get current cluster members\", log.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\n\t\tmembers = make(map[int64]APIHeartbeatMember, len(dbMembers))\n\t\tfor _, dbMember := range dbMembers {\n\t\t\tmembers[dbMember.ID] = APIHeartbeatMember{\n\t\t\t\tID: dbMember.ID,\n\t\t\t\tName: dbMember.Name,\n\t\t\t\tAddress: dbMember.Address,\n\t\t\t\tLastHeartbeat: dbMember.Heartbeat,\n\t\t\t\tOnline: !dbMember.IsOffline(offlineThreshold),\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Get the current cluster nodes.\n\tvar nodes []db.NodeInfo\n\tvar offlineThreshold time.Duration\n\n\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\n\t\tnodes, err = tx.GetNodes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tofflineThreshold, err = tx.GetNodeOfflineThreshold()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlogger.Warn(\"Failed to get current cluster members\", log.Ctx{\"err\": err})\n\t\treturn\n\t}\n\tif len(nodes) == 1 {\n\t\treturn \/\/ Either we're not clustered or this is a single-node cluster\n\t}\n\n\taddress := endpoints.NetworkAddress()\n\n\taddresses := make([]string, len(nodes))\n\tfor i, node := range nodes {\n\t\taddresses[i] = node.Address\n\n\t\tif node.Address == address {\n\t\t\tcontinue\n\t\t}\n\n\t\tlistenersLock.Lock()\n\t\tlistener, ok := listeners[node.Address]\n\n\t\t\/\/ Don't bother trying to connect to offline nodes, or to ourselves.\n\t\tif node.IsOffline(offlineThreshold) {\n\t\t\tif ok {\n\t\t\t\tlistener.Disconnect()\n\t\t\t}\n\n\t\t\tlistenersLock.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ The node has already a listener associated to it.\n\t\tif ok {\n\t\t\t\/\/ Double check that the listener is still\n\t\t\t\/\/ connected. If it is, just move on, other\n\t\t\t\/\/ we'll try to connect again.\n\t\t\tif listeners[node.Address].IsActive() {\n\t\t\t\tlistenersLock.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdelete(listeners, node.Address)\n\t\t}\n\t\tlistenersLock.Unlock()\n\n\t\tlistener, err := eventsConnect(node.Address, endpoints.NetworkCert(), serverCert())\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Failed to get events from member\", log.Ctx{\"address\": node.Address, \"err\": err})\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Debug(\"Listening for events on member\", log.Ctx{\"address\": node.Address})\n\t\tlistener.AddHandler(nil, func(event api.Event) { f(node.ID, event) })\n\n\t\tlistenersLock.Lock()\n\t\tlisteners[node.Address] = listener\n\t\tlistenersLock.Unlock()\n\t}\n\n\tlistenersLock.Lock()\n\tfor address, listener := range listeners {\n\t\tif !shared.StringInSlice(address, addresses) {\n\t\t\tlistener.Disconnect()\n\t\t\tdelete(listeners, address)\n\t\t}\n\t}\n\tlistenersLock.Unlock()\n}\n\n\/\/ Establish a client connection to get events from the given node.\nfunc eventsConnect(address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo) (*lxd.EventListener, error) {\n\tclient, err := Connect(address, networkCert, serverCert, nil, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set the project to the special wildcard in order to get notified\n\t\/\/ about all events across all projects.\n\tclient = client.UseProject(\"*\")\n\n\treturn client.GetEvents()\n}\n<|endoftext|>"} {"text":"<commit_before>package device\n\nimport (\n\t\"fmt\"\n\n\tdeviceConfig \"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n)\n\ntype nicP2P struct {\n\tdeviceCommon\n}\n\n\/\/ CanHotPlug returns whether the device can be managed whilst the instance is running. Returns true.\nfunc (d *nicP2P) CanHotPlug() bool {\n\treturn true\n}\n\n\/\/ validateConfig checks the supplied config for correctness.\nfunc (d *nicP2P) validateConfig(instConf instance.ConfigReader) error {\n\tif !instanceSupported(instConf.Type(), instancetype.Container, instancetype.VM) {\n\t\treturn ErrUnsupportedDevType\n\t}\n\n\toptionalFields := []string{\n\t\t\"name\",\n\t\t\"mtu\",\n\t\t\"hwaddr\",\n\t\t\"host_name\",\n\t\t\"limits.ingress\",\n\t\t\"limits.egress\",\n\t\t\"limits.max\",\n\t\t\"ipv4.routes\",\n\t\t\"ipv6.routes\",\n\t\t\"boot.priority\",\n\t}\n\terr := d.config.Validate(nicValidationRules([]string{}, optionalFields, instConf))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateEnvironment checks the runtime environment for correctness.\nfunc (d *nicP2P) validateEnvironment() error {\n\tif d.inst.Type() == instancetype.Container && d.config[\"name\"] == \"\" {\n\t\treturn fmt.Errorf(\"Requires name property to start\")\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdatableFields returns a list of fields that can be updated without triggering a device remove & add.\nfunc (d *nicP2P) UpdatableFields(oldDevice Type) []string {\n\t\/\/ Check old and new device types match.\n\t_, match := oldDevice.(*nicP2P)\n\tif !match {\n\t\treturn []string{}\n\t}\n\n\treturn []string{\"limits.ingress\", \"limits.egress\", \"limits.max\", \"ipv4.routes\", \"ipv6.routes\"}\n}\n\n\/\/ Start is run when the device is added to a running instance or instance is starting up.\nfunc (d *nicP2P) Start() (*deviceConfig.RunConfig, error) {\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\tsaveData := make(map[string]string)\n\tsaveData[\"host_name\"] = d.config[\"host_name\"]\n\n\tvar peerName string\n\n\t\/\/ Create veth pair and configure the peer end with custom hwaddr and mtu if supplied.\n\tif d.inst.Type() == instancetype.Container {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"veth\")\n\t\t}\n\t\tpeerName, err = networkCreateVethPair(saveData[\"host_name\"], d.config)\n\t} else if d.inst.Type() == instancetype.VM {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"tap\")\n\t\t}\n\t\tpeerName = saveData[\"host_name\"] \/\/ VMs use the host_name to link to the TAP FD.\n\t\terr = networkCreateTap(saveData[\"host_name\"], d.config)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { network.InterfaceRemove(saveData[\"host_name\"]) })\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, saveData)\n\n\t\/\/ Apply host-side routes to veth interface.\n\terr = networkNICRouteAdd(d.config[\"host_name\"], append(util.SplitNTrimSpace(d.config[\"ipv4.routes\"], \",\", -1, true), util.SplitNTrimSpace(d.config[\"ipv6.routes\"], \",\", -1, true)...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Apply host-side limits.\n\terr = networkSetupHostVethLimits(d.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.volatileSet(saveData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunConf := deviceConfig.RunConfig{}\n\trunConf.NetworkInterface = []deviceConfig.RunConfigItem{\n\t\t{Key: \"type\", Value: \"phys\"},\n\t\t{Key: \"name\", Value: d.config[\"name\"]},\n\t\t{Key: \"flags\", Value: \"up\"},\n\t\t{Key: \"link\", Value: peerName},\n\t}\n\n\tif d.inst.Type() == instancetype.VM {\n\t\trunConf.NetworkInterface = append(runConf.NetworkInterface,\n\t\t\t[]deviceConfig.RunConfigItem{\n\t\t\t\t{Key: \"devName\", Value: d.name},\n\t\t\t\t{Key: \"hwaddr\", Value: d.config[\"hwaddr\"]},\n\t\t\t}...)\n\t}\n\n\trevert.Success()\n\treturn &runConf, nil\n}\n\n\/\/ Update applies configuration changes to a started device.\nfunc (d *nicP2P) Update(oldDevices deviceConfig.Devices, isRunning bool) error {\n\tif !isRunning {\n\t\treturn nil\n\t}\n\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toldConfig := oldDevices[d.name]\n\tv := d.volatileGet()\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, v)\n\tnetworkVethFillFromVolatile(oldConfig, v)\n\n\t\/\/ Remove old host-side routes from veth interface.\n\tnetworkNICRouteDelete(oldConfig[\"host_name\"], append(util.SplitNTrimSpace(oldConfig[\"ipv4.routes\"], \",\", -1, true), util.SplitNTrimSpace(oldConfig[\"ipv6.routes\"], \",\", -1, true)...)...)\n\n\t\/\/ Apply host-side routes to veth interface.\n\terr = networkNICRouteAdd(d.config[\"host_name\"], append(util.SplitNTrimSpace(d.config[\"ipv4.routes\"], \",\", -1, true), util.SplitNTrimSpace(d.config[\"ipv6.routes\"], \",\", -1, true)...)...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Apply host-side limits.\n\terr = networkSetupHostVethLimits(d.config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Stop is run when the device is removed from the instance.\nfunc (d *nicP2P) Stop() (*deviceConfig.RunConfig, error) {\n\trunConf := deviceConfig.RunConfig{\n\t\tPostHooks: []func() error{d.postStop},\n\t}\n\n\treturn &runConf, nil\n}\n\n\/\/ postStop is run after the device is removed from the instance.\nfunc (d *nicP2P) postStop() error {\n\tdefer d.volatileSet(map[string]string{\n\t\t\"host_name\": \"\",\n\t})\n\n\tv := d.volatileGet()\n\n\tnetworkVethFillFromVolatile(d.config, v)\n\n\tif d.config[\"host_name\"] != \"\" && network.InterfaceExists(d.config[\"host_name\"]) {\n\t\t\/\/ Removing host-side end of veth pair will delete the peer end too.\n\t\terr := network.InterfaceRemove(d.config[\"host_name\"])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to remove interface %s: %s\", d.config[\"host_name\"], err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>lxd\/device\/nic\/p2p: networkCreateVethPair and networkCreateTap usage<commit_after>package device\n\nimport (\n\t\"fmt\"\n\n\tdeviceConfig \"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n)\n\ntype nicP2P struct {\n\tdeviceCommon\n}\n\n\/\/ CanHotPlug returns whether the device can be managed whilst the instance is running. Returns true.\nfunc (d *nicP2P) CanHotPlug() bool {\n\treturn true\n}\n\n\/\/ validateConfig checks the supplied config for correctness.\nfunc (d *nicP2P) validateConfig(instConf instance.ConfigReader) error {\n\tif !instanceSupported(instConf.Type(), instancetype.Container, instancetype.VM) {\n\t\treturn ErrUnsupportedDevType\n\t}\n\n\toptionalFields := []string{\n\t\t\"name\",\n\t\t\"mtu\",\n\t\t\"hwaddr\",\n\t\t\"host_name\",\n\t\t\"limits.ingress\",\n\t\t\"limits.egress\",\n\t\t\"limits.max\",\n\t\t\"ipv4.routes\",\n\t\t\"ipv6.routes\",\n\t\t\"boot.priority\",\n\t}\n\terr := d.config.Validate(nicValidationRules([]string{}, optionalFields, instConf))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateEnvironment checks the runtime environment for correctness.\nfunc (d *nicP2P) validateEnvironment() error {\n\tif d.inst.Type() == instancetype.Container && d.config[\"name\"] == \"\" {\n\t\treturn fmt.Errorf(\"Requires name property to start\")\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdatableFields returns a list of fields that can be updated without triggering a device remove & add.\nfunc (d *nicP2P) UpdatableFields(oldDevice Type) []string {\n\t\/\/ Check old and new device types match.\n\t_, match := oldDevice.(*nicP2P)\n\tif !match {\n\t\treturn []string{}\n\t}\n\n\treturn []string{\"limits.ingress\", \"limits.egress\", \"limits.max\", \"ipv4.routes\", \"ipv6.routes\"}\n}\n\n\/\/ Start is run when the device is added to a running instance or instance is starting up.\nfunc (d *nicP2P) Start() (*deviceConfig.RunConfig, error) {\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\tsaveData := make(map[string]string)\n\tsaveData[\"host_name\"] = d.config[\"host_name\"]\n\n\tvar peerName string\n\tvar mtu uint32\n\n\t\/\/ Create veth pair and configure the peer end with custom hwaddr and mtu if supplied.\n\tif d.inst.Type() == instancetype.Container {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"veth\")\n\t\t}\n\n\t\tpeerName, mtu, err = networkCreateVethPair(saveData[\"host_name\"], d.config)\n\t} else if d.inst.Type() == instancetype.VM {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"tap\")\n\t\t}\n\n\t\tpeerName = saveData[\"host_name\"] \/\/ VMs use the host_name to link to the TAP FD.\n\t\tmtu, err = networkCreateTap(saveData[\"host_name\"], d.config)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { network.InterfaceRemove(saveData[\"host_name\"]) })\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, saveData)\n\n\t\/\/ Apply host-side routes to veth interface.\n\terr = networkNICRouteAdd(d.config[\"host_name\"], append(util.SplitNTrimSpace(d.config[\"ipv4.routes\"], \",\", -1, true), util.SplitNTrimSpace(d.config[\"ipv6.routes\"], \",\", -1, true)...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Apply host-side limits.\n\terr = networkSetupHostVethLimits(d.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.volatileSet(saveData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunConf := deviceConfig.RunConfig{}\n\trunConf.NetworkInterface = []deviceConfig.RunConfigItem{\n\t\t{Key: \"type\", Value: \"phys\"},\n\t\t{Key: \"name\", Value: d.config[\"name\"]},\n\t\t{Key: \"flags\", Value: \"up\"},\n\t\t{Key: \"link\", Value: peerName},\n\t}\n\n\tif d.inst.Type() == instancetype.VM {\n\t\trunConf.NetworkInterface = append(runConf.NetworkInterface,\n\t\t\t[]deviceConfig.RunConfigItem{\n\t\t\t\t{Key: \"devName\", Value: d.name},\n\t\t\t\t{Key: \"hwaddr\", Value: d.config[\"hwaddr\"]},\n\t\t\t\t{Key: \"mtu\", Value: fmt.Sprintf(\"%d\", mtu)},\n\t\t\t}...)\n\t}\n\n\trevert.Success()\n\treturn &runConf, nil\n}\n\n\/\/ Update applies configuration changes to a started device.\nfunc (d *nicP2P) Update(oldDevices deviceConfig.Devices, isRunning bool) error {\n\tif !isRunning {\n\t\treturn nil\n\t}\n\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toldConfig := oldDevices[d.name]\n\tv := d.volatileGet()\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, v)\n\tnetworkVethFillFromVolatile(oldConfig, v)\n\n\t\/\/ Remove old host-side routes from veth interface.\n\tnetworkNICRouteDelete(oldConfig[\"host_name\"], append(util.SplitNTrimSpace(oldConfig[\"ipv4.routes\"], \",\", -1, true), util.SplitNTrimSpace(oldConfig[\"ipv6.routes\"], \",\", -1, true)...)...)\n\n\t\/\/ Apply host-side routes to veth interface.\n\terr = networkNICRouteAdd(d.config[\"host_name\"], append(util.SplitNTrimSpace(d.config[\"ipv4.routes\"], \",\", -1, true), util.SplitNTrimSpace(d.config[\"ipv6.routes\"], \",\", -1, true)...)...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Apply host-side limits.\n\terr = networkSetupHostVethLimits(d.config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Stop is run when the device is removed from the instance.\nfunc (d *nicP2P) Stop() (*deviceConfig.RunConfig, error) {\n\trunConf := deviceConfig.RunConfig{\n\t\tPostHooks: []func() error{d.postStop},\n\t}\n\n\treturn &runConf, nil\n}\n\n\/\/ postStop is run after the device is removed from the instance.\nfunc (d *nicP2P) postStop() error {\n\tdefer d.volatileSet(map[string]string{\n\t\t\"host_name\": \"\",\n\t})\n\n\tv := d.volatileGet()\n\n\tnetworkVethFillFromVolatile(d.config, v)\n\n\tif d.config[\"host_name\"] != \"\" && network.InterfaceExists(d.config[\"host_name\"]) {\n\t\t\/\/ Removing host-side end of veth pair will delete the peer end too.\n\t\terr := network.InterfaceRemove(d.config[\"host_name\"])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to remove interface %s: %s\", d.config[\"host_name\"], err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package amazonec2\n\nimport (\n\t\"testing\"\n\n\t\"errors\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/docker\/machine\/commands\/commandstest\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst (\n\ttestSSHPort = 22\n\ttestDockerPort = 2376\n)\n\nvar (\n\tsecurityGroup = &ec2.SecurityGroup{\n\t\tGroupName: aws.String(\"test-group\"),\n\t\tGroupId: aws.String(\"12345\"),\n\t\tVpcId: aws.String(\"12345\"),\n\t}\n)\n\nfunc TestConfigureSecurityGroupPermissionsEmpty(t *testing.T) {\n\tdriver := NewTestDriver()\n\n\tperms := driver.configureSecurityGroupPermissions(securityGroup)\n\n\tassert.Len(t, perms, 2)\n}\n\nfunc TestConfigureSecurityGroupPermissionsSshOnly(t *testing.T) {\n\tdriver := NewTestDriver()\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(int64(testSSHPort)),\n\t\t\tToPort: aws.Int64(int64(testSSHPort)),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Len(t, perms, 1)\n\tassert.Equal(t, testDockerPort, *perms[0].FromPort)\n}\n\nfunc TestConfigureSecurityGroupPermissionsDockerOnly(t *testing.T) {\n\tdriver := NewTestDriver()\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64((testDockerPort)),\n\t\t\tToPort: aws.Int64((testDockerPort)),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Len(t, perms, 1)\n\tassert.Equal(t, testSSHPort, *perms[0].FromPort)\n}\n\nfunc TestConfigureSecurityGroupPermissionsDockerAndSsh(t *testing.T) {\n\tdriver := NewTestDriver()\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(testSSHPort),\n\t\t\tToPort: aws.Int64(testSSHPort),\n\t\t},\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(testDockerPort),\n\t\t\tToPort: aws.Int64(testDockerPort),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Empty(t, perms)\n}\n\nfunc TestValidateAwsRegionValid(t *testing.T) {\n\tregions := []string{\"eu-west-1\", \"eu-central-1\"}\n\n\tfor _, region := range regions {\n\t\tvalidatedRegion, err := validateAwsRegion(region)\n\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, region, validatedRegion)\n\t}\n}\n\nfunc TestValidateAwsRegionInvalid(t *testing.T) {\n\tregions := []string{\"eu-west-2\", \"eu-central-2\"}\n\n\tfor _, region := range regions {\n\t\t_, err := validateAwsRegion(region)\n\n\t\tassert.EqualError(t, err, \"Invalid region specified\")\n\t}\n}\n\nfunc TestFindDefaultVPC(t *testing.T) {\n\tdriver := NewDriver(\"machineFoo\", \"path\")\n\tdriver.clientFactory = func() Ec2Client { return &fakeEC2WithLogin{} }\n\n\tvpc, err := driver.getDefaultVPCId()\n\n\tassert.Equal(t, \"vpc-9999\", vpc)\n\tassert.NoError(t, err)\n}\n\nfunc TestDefaultVPCIsMissing(t *testing.T) {\n\tdriver := NewDriver(\"machineFoo\", \"path\")\n\tdriver.clientFactory = func() Ec2Client {\n\t\treturn &fakeEC2WithDescribe{\n\t\t\toutput: &ec2.DescribeAccountAttributesOutput{\n\t\t\t\tAccountAttributes: []*ec2.AccountAttribute{},\n\t\t\t},\n\t\t}\n\t}\n\n\tvpc, err := driver.getDefaultVPCId()\n\n\tassert.EqualError(t, err, \"No default-vpc attribute\")\n\tassert.Empty(t, vpc)\n}\n\nfunc TestDescribeAccountAttributeFails(t *testing.T) {\n\tdriver := NewDriver(\"machineFoo\", \"path\")\n\tdriver.clientFactory = func() Ec2Client {\n\t\treturn &fakeEC2WithDescribe{\n\t\t\terr: errors.New(\"Not Found\"),\n\t\t}\n\t}\n\n\tvpc, err := driver.getDefaultVPCId()\n\n\tassert.EqualError(t, err, \"Not Found\")\n\tassert.Empty(t, vpc)\n}\n\nfunc TestAccessKeyIsMandatory(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.Equal(t, err, errorMissingAccessKeyOption)\n}\n\nfunc TestAccessKeyIsMandatoryEvenIfSecretKeyIsPassed(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-secret-key\": \"123\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.Equal(t, err, errorMissingAccessKeyOption)\n}\n\nfunc TestSecretKeyIsMandatory(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-access-key\": \"foobar\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.Equal(t, err, errorMissingSecretKeyOption)\n}\n\nfunc TestLoadingFromCredentialsWorked(t *testing.T) {\n\tdriver := NewCustomTestDriver(&fakeEC2WithLogin{})\n\tdriver.awsCredentials = &fileCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"access\", driver.AccessKey)\n\tassert.Equal(t, \"secret\", driver.SecretKey)\n\tassert.Equal(t, \"token\", driver.SessionToken)\n}\n\nfunc TestPassingBothCLIArgWorked(t *testing.T) {\n\tdriver := NewCustomTestDriver(&fakeEC2WithLogin{})\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-access-key\": \"foobar\",\n\t\t\t\"amazonec2-secret-key\": \"123\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foobar\", driver.AccessKey)\n\tassert.Equal(t, \"123\", driver.SecretKey)\n}\n<commit_msg>Add missing test on SecurityGroupPermission<commit_after>package amazonec2\n\nimport (\n\t\"testing\"\n\n\t\"errors\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/docker\/machine\/commands\/commandstest\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst (\n\ttestSSHPort = 22\n\ttestDockerPort = 2376\n\ttestSwarmPort = 3376\n)\n\nvar (\n\tsecurityGroup = &ec2.SecurityGroup{\n\t\tGroupName: aws.String(\"test-group\"),\n\t\tGroupId: aws.String(\"12345\"),\n\t\tVpcId: aws.String(\"12345\"),\n\t}\n)\n\nfunc TestConfigureSecurityGroupPermissionsEmpty(t *testing.T) {\n\tdriver := NewTestDriver()\n\n\tperms := driver.configureSecurityGroupPermissions(securityGroup)\n\n\tassert.Len(t, perms, 2)\n}\n\nfunc TestConfigureSecurityGroupPermissionsSshOnly(t *testing.T) {\n\tdriver := NewTestDriver()\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(int64(testSSHPort)),\n\t\t\tToPort: aws.Int64(int64(testSSHPort)),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Len(t, perms, 1)\n\tassert.Equal(t, testDockerPort, *perms[0].FromPort)\n}\n\nfunc TestConfigureSecurityGroupPermissionsDockerOnly(t *testing.T) {\n\tdriver := NewTestDriver()\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64((testDockerPort)),\n\t\t\tToPort: aws.Int64((testDockerPort)),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Len(t, perms, 1)\n\tassert.Equal(t, testSSHPort, *perms[0].FromPort)\n}\n\nfunc TestConfigureSecurityGroupPermissionsDockerAndSsh(t *testing.T) {\n\tdriver := NewTestDriver()\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(testSSHPort),\n\t\t\tToPort: aws.Int64(testSSHPort),\n\t\t},\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(testDockerPort),\n\t\t\tToPort: aws.Int64(testDockerPort),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Empty(t, perms)\n}\n\nfunc TestConfigureSecurityGroupPermissionsWithSwarm(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.SwarmMaster = true\n\tgroup := securityGroup\n\tgroup.IpPermissions = []*ec2.IpPermission{\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(testSSHPort),\n\t\t\tToPort: aws.Int64(testSSHPort),\n\t\t},\n\t\t{\n\t\t\tIpProtocol: aws.String(\"tcp\"),\n\t\t\tFromPort: aws.Int64(testDockerPort),\n\t\t\tToPort: aws.Int64(testDockerPort),\n\t\t},\n\t}\n\n\tperms := driver.configureSecurityGroupPermissions(group)\n\n\tassert.Len(t, perms, 1)\n\tassert.Equal(t, testSwarmPort, *perms[0].FromPort)\n}\n\nfunc TestValidateAwsRegionValid(t *testing.T) {\n\tregions := []string{\"eu-west-1\", \"eu-central-1\"}\n\n\tfor _, region := range regions {\n\t\tvalidatedRegion, err := validateAwsRegion(region)\n\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, region, validatedRegion)\n\t}\n}\n\nfunc TestValidateAwsRegionInvalid(t *testing.T) {\n\tregions := []string{\"eu-west-2\", \"eu-central-2\"}\n\n\tfor _, region := range regions {\n\t\t_, err := validateAwsRegion(region)\n\n\t\tassert.EqualError(t, err, \"Invalid region specified\")\n\t}\n}\n\nfunc TestFindDefaultVPC(t *testing.T) {\n\tdriver := NewDriver(\"machineFoo\", \"path\")\n\tdriver.clientFactory = func() Ec2Client { return &fakeEC2WithLogin{} }\n\n\tvpc, err := driver.getDefaultVPCId()\n\n\tassert.Equal(t, \"vpc-9999\", vpc)\n\tassert.NoError(t, err)\n}\n\nfunc TestDefaultVPCIsMissing(t *testing.T) {\n\tdriver := NewDriver(\"machineFoo\", \"path\")\n\tdriver.clientFactory = func() Ec2Client {\n\t\treturn &fakeEC2WithDescribe{\n\t\t\toutput: &ec2.DescribeAccountAttributesOutput{\n\t\t\t\tAccountAttributes: []*ec2.AccountAttribute{},\n\t\t\t},\n\t\t}\n\t}\n\n\tvpc, err := driver.getDefaultVPCId()\n\n\tassert.EqualError(t, err, \"No default-vpc attribute\")\n\tassert.Empty(t, vpc)\n}\n\nfunc TestDescribeAccountAttributeFails(t *testing.T) {\n\tdriver := NewDriver(\"machineFoo\", \"path\")\n\tdriver.clientFactory = func() Ec2Client {\n\t\treturn &fakeEC2WithDescribe{\n\t\t\terr: errors.New(\"Not Found\"),\n\t\t}\n\t}\n\n\tvpc, err := driver.getDefaultVPCId()\n\n\tassert.EqualError(t, err, \"Not Found\")\n\tassert.Empty(t, vpc)\n}\n\nfunc TestAccessKeyIsMandatory(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.Equal(t, err, errorMissingAccessKeyOption)\n}\n\nfunc TestAccessKeyIsMandatoryEvenIfSecretKeyIsPassed(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-secret-key\": \"123\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.Equal(t, err, errorMissingAccessKeyOption)\n}\n\nfunc TestSecretKeyIsMandatory(t *testing.T) {\n\tdriver := NewTestDriver()\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-access-key\": \"foobar\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.Equal(t, err, errorMissingSecretKeyOption)\n}\n\nfunc TestLoadingFromCredentialsWorked(t *testing.T) {\n\tdriver := NewCustomTestDriver(&fakeEC2WithLogin{})\n\tdriver.awsCredentials = &fileCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"access\", driver.AccessKey)\n\tassert.Equal(t, \"secret\", driver.SecretKey)\n\tassert.Equal(t, \"token\", driver.SessionToken)\n}\n\nfunc TestPassingBothCLIArgWorked(t *testing.T) {\n\tdriver := NewCustomTestDriver(&fakeEC2WithLogin{})\n\tdriver.awsCredentials = &cliCredentials{}\n\toptions := &commandstest.FakeFlagger{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": \"test\",\n\t\t\t\"amazonec2-access-key\": \"foobar\",\n\t\t\t\"amazonec2-secret-key\": \"123\",\n\t\t\t\"amazonec2-region\": \"us-east-1\",\n\t\t\t\"amazonec2-zone\": \"e\",\n\t\t},\n\t}\n\n\terr := driver.SetConfigFromFlags(options)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foobar\", driver.AccessKey)\n\tassert.Equal(t, \"123\", driver.SecretKey)\n}\n<|endoftext|>"} {"text":"<commit_before>package sequence\n\n\/*\nNote :\n(1) store the sequence in the ETCD cluster, and local file(sequence.dat)\n(2) batch get the sequences from ETCD cluster, and store the max sequence id in the local file\n(3) the sequence range is : [currentSeqId, maxSeqId), when the currentSeqId >= maxSeqId, fetch the new maxSeqId.\n*\/\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"go.etcd.io\/etcd\/client\"\n)\n\nconst (\n\t\/\/ EtcdKeyPrefix = \"\/seaweedfs\"\n\tEtcdKeySequence = \"\/master\/sequence\"\n\tEtcdContextTimeoutSecond = 100 * time.Second\n\tDefaultEtcdSteps uint64 = 500 \/\/ internal counter\n\tSequencerFileName = \"sequencer.dat\"\n\tFileMaxSequenceLength = 128\n)\n\ntype EtcdSequencer struct {\n\tsequenceLock sync.Mutex\n\n\t\/\/ available sequence range : [currentSeqId, maxSeqId)\n\tcurrentSeqId uint64\n\tmaxSeqId uint64\n\n\tkeysAPI client.KeysAPI\n\tseqFile *os.File\n}\n\nfunc NewEtcdSequencer(etcdUrls string, metaFolder string) (*EtcdSequencer, error) {\n\tfile, err := openSequenceFile(metaFolder + \"\/\" + SequencerFileName)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"open sequence file fialed, %v\", err)\n\t}\n\tdefer file.Close()\n\n\tcli, err := client.New(client.Config{\n\t\tEndpoints: strings.Split(etcdUrls, \",\"),\n\t\tUsername: \"\",\n\t\tPassword: \"\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeysApi := client.NewKeysAPI(cli)\n\n\t\/\/ TODO: the current sequence id in local file is not used\n\tmaxValue, _, err := readSequenceFile(file)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read sequence from file failed, %v\", err)\n\t}\n\tglog.V(4).Infof(\"read sequence from file : %d\", maxValue)\n\n\tnewSeq, err := setMaxSequenceToEtcd(keysApi, maxValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsequencer := &EtcdSequencer{maxSeqId: newSeq,\n\t\tcurrentSeqId: newSeq,\n\t\tkeysAPI: keysApi,\n\t\tseqFile: file,\n\t}\n\treturn sequencer, nil\n}\n\nfunc (es *EtcdSequencer) NextFileId(count uint64) uint64 {\n\tes.sequenceLock.Lock()\n\tdefer es.sequenceLock.Unlock()\n\n\tif (es.currentSeqId + count) >= es.maxSeqId {\n\t\treqSteps := DefaultEtcdSteps\n\t\tif count > DefaultEtcdSteps {\n\t\t\treqSteps += count\n\t\t}\n\t\tmaxId, err := batchGetSequenceFromEtcd(es.keysAPI, reqSteps)\n\t\tglog.V(4).Infof(\"get max sequence id from etcd, %d\", maxId)\n\t\tif err != nil {\n\t\t\tglog.Error(err)\n\t\t\treturn 0\n\t\t}\n\t\tes.currentSeqId, es.maxSeqId = maxId-reqSteps, maxId\n\t\tglog.V(4).Infof(\"current id : %d, max id : %d\", es.currentSeqId, es.maxSeqId)\n\n\t\tif err := writeSequenceFile(es.seqFile, es.maxSeqId, es.currentSeqId); err != nil {\n\t\t\tglog.Errorf(\"flush sequence to file failed, %v\", err)\n\t\t}\n\t}\n\n\tret := es.currentSeqId\n\tes.currentSeqId += count\n\treturn ret\n}\n\n\/**\ninstead of collecting the max value from volume server,\nthe max value should be saved in local config file and ETCD cluster\n*\/\nfunc (es *EtcdSequencer) SetMax(seenValue uint64) {\n\tes.sequenceLock.Lock()\n\tdefer es.sequenceLock.Unlock()\n\tif seenValue > es.maxSeqId {\n\t\tmaxId, err := setMaxSequenceToEtcd(es.keysAPI, seenValue)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"set Etcd Max sequence failed : %v\", err)\n\t\t\treturn\n\t\t}\n\t\tes.currentSeqId, es.maxSeqId = maxId, maxId\n\n\t\tif err := writeSequenceFile(es.seqFile, maxId, maxId); err != nil {\n\t\t\tglog.Errorf(\"flush sequence to file failed, %v\", err)\n\t\t}\n\t}\n}\n\nfunc (es *EtcdSequencer) GetMax() uint64 {\n\treturn es.maxSeqId\n}\n\nfunc (es *EtcdSequencer) Peek() uint64 {\n\treturn es.currentSeqId\n}\n\nfunc batchGetSequenceFromEtcd(kvApi client.KeysAPI, step uint64) (uint64, error) {\n\tif step <= 0 {\n\t\treturn 0, fmt.Errorf(\"the step must be large than 1\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)\n\tvar endSeqValue uint64 = 0\n\tdefer cancel()\n\tfor {\n\t\tgetResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true})\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif getResp.Node == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tprevValue := getResp.Node.Value\n\t\tprevSeqValue, err := strconv.ParseUint(prevValue, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"get sequence from etcd failed, %v\", err)\n\t\t}\n\t\tendSeqValue = prevSeqValue + step\n\t\tendSeqStr := strconv.FormatUint(endSeqValue, 10)\n\n\t\t_, err = kvApi.Set(ctx, EtcdKeySequence, endSeqStr, &client.SetOptions{PrevValue: prevValue})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tglog.Error(err)\n\t}\n\n\treturn endSeqValue, nil\n}\n\n\/**\nupdate the value of the key EtcdKeySequence in ETCD cluster with the parameter of maxSeq,\nwhen the value of the key EtcdKeySequence is equal to or large than the parameter maxSeq,\nreturn the value of EtcdKeySequence in the ETCD cluster;\nwhen the value of the EtcdKeySequence is less than the parameter maxSeq,\nreturn the value of the parameter maxSeq\n*\/\nfunc setMaxSequenceToEtcd(kvApi client.KeysAPI, maxSeq uint64) (uint64, error) {\n\tmaxSeqStr := strconv.FormatUint(maxSeq, 10)\n\tctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)\n\tdefer cancel()\n\n\tfor {\n\t\tgetResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true})\n\t\tif err != nil {\n\t\t\tif ce, ok := err.(client.Error); ok && (ce.Code == client.ErrorCodeKeyNotFound) {\n\t\t\t\t_, err := kvApi.Create(ctx, EtcdKeySequence, maxSeqStr)\n\t\t\t\tif err == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif ce, ok = err.(client.Error); ok && (ce.Code == client.ErrorCodeNodeExist) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn 0, err\n\t\t\t} else {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\tif getResp.Node == nil {\n\t\t\tcontinue\n\t\t}\n\t\tprevSeqStr := getResp.Node.Value\n\t\tprevSeq, err := strconv.ParseUint(prevSeqStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif prevSeq >= maxSeq {\n\t\t\treturn prevSeq, nil\n\t\t}\n\n\t\t_, err = kvApi.Set(ctx, EtcdKeySequence, maxSeqStr, &client.SetOptions{PrevValue: prevSeqStr})\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n}\n\nfunc openSequenceFile(file string) (*os.File, error) {\n\t_, err := os.Stat(file)\n\tif os.IsNotExist(err) {\n\t\tfid, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := writeSequenceFile(fid, 1, 0); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn fid, nil\n\t} else {\n\t\treturn os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)\n\t}\n}\n\n\/*\nread sequence and step from sequence file\n*\/\nfunc readSequenceFile(file *os.File) (uint64, uint64, error) {\n\tsequence := make([]byte, FileMaxSequenceLength)\n\tsize, err := file.ReadAt(sequence, 0)\n\tif (err != nil) && (err != io.EOF) {\n\t\terr := fmt.Errorf(\"cannot read file %s, %v\", file.Name(), err)\n\t\treturn 0, 0, err\n\t}\n\tsequence = sequence[0:size]\n\tseqs := strings.Split(string(sequence), \":\")\n\tmaxId, err := strconv.ParseUint(seqs[0], 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"parse sequence from file failed, %v\", err)\n\t}\n\n\tif len(seqs) > 1 {\n\t\tstep, err := strconv.ParseUint(seqs[1], 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, 0, fmt.Errorf(\"parse sequence from file failed, %v\", err)\n\t\t}\n\t\treturn maxId, step, nil\n\t}\n\n\treturn maxId, 0, nil\n}\n\n\/**\nwrite the sequence and step to sequence file\n*\/\nfunc writeSequenceFile(file *os.File, sequence, step uint64) error {\n\t_ = step\n\tseqStr := fmt.Sprintf(\"%d:%d\", sequence, sequence)\n\tif _, err := file.Seek(0, 0); err != nil {\n\t\terr = fmt.Errorf(\"cannot seek to the beginning of %s: %v\", file.Name(), err)\n\t\treturn err\n\t}\n\tif err := file.Truncate(0); err != nil {\n\t\treturn fmt.Errorf(\"truncate sequence file faield : %v\", err)\n\t}\n\tif _, err := file.WriteString(seqStr); err != nil {\n\t\treturn fmt.Errorf(\"write file %s failed, %v\", file.Name(), err)\n\t}\n\tif err := file.Sync(); err != nil {\n\t\treturn fmt.Errorf(\"flush file %s failed, %v\", file.Name(), err)\n\t}\n\treturn nil\n}\n\n\/\/ the UT helper method\n\/\/ func deleteEtcdKey(kvApi client.KeysAPI, key string) error {\n\/\/ \tctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)\n\/\/ \tdefer cancel()\n\/\/ \t_, err := kvApi.Delete(ctx, key, &client.DeleteOptions{Dir: false})\n\/\/ \tif err != nil {\n\/\/ \t\treturn err\n\/\/ \t}\n\/\/ \treturn nil\n\/\/ }\n<commit_msg>Revert \"close opened file\"<commit_after>package sequence\n\n\/*\nNote :\n(1) store the sequence in the ETCD cluster, and local file(sequence.dat)\n(2) batch get the sequences from ETCD cluster, and store the max sequence id in the local file\n(3) the sequence range is : [currentSeqId, maxSeqId), when the currentSeqId >= maxSeqId, fetch the new maxSeqId.\n*\/\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"go.etcd.io\/etcd\/client\"\n)\n\nconst (\n\t\/\/ EtcdKeyPrefix = \"\/seaweedfs\"\n\tEtcdKeySequence = \"\/master\/sequence\"\n\tEtcdContextTimeoutSecond = 100 * time.Second\n\tDefaultEtcdSteps uint64 = 500 \/\/ internal counter\n\tSequencerFileName = \"sequencer.dat\"\n\tFileMaxSequenceLength = 128\n)\n\ntype EtcdSequencer struct {\n\tsequenceLock sync.Mutex\n\n\t\/\/ available sequence range : [currentSeqId, maxSeqId)\n\tcurrentSeqId uint64\n\tmaxSeqId uint64\n\n\tkeysAPI client.KeysAPI\n\tseqFile *os.File\n}\n\nfunc NewEtcdSequencer(etcdUrls string, metaFolder string) (*EtcdSequencer, error) {\n\tfile, err := openSequenceFile(metaFolder + \"\/\" + SequencerFileName)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"open sequence file fialed, %v\", err)\n\t}\n\n\tcli, err := client.New(client.Config{\n\t\tEndpoints: strings.Split(etcdUrls, \",\"),\n\t\tUsername: \"\",\n\t\tPassword: \"\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeysApi := client.NewKeysAPI(cli)\n\n\t\/\/ TODO: the current sequence id in local file is not used\n\tmaxValue, _, err := readSequenceFile(file)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read sequence from file failed, %v\", err)\n\t}\n\tglog.V(4).Infof(\"read sequence from file : %d\", maxValue)\n\n\tnewSeq, err := setMaxSequenceToEtcd(keysApi, maxValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsequencer := &EtcdSequencer{maxSeqId: newSeq,\n\t\tcurrentSeqId: newSeq,\n\t\tkeysAPI: keysApi,\n\t\tseqFile: file,\n\t}\n\treturn sequencer, nil\n}\n\nfunc (es *EtcdSequencer) NextFileId(count uint64) uint64 {\n\tes.sequenceLock.Lock()\n\tdefer es.sequenceLock.Unlock()\n\n\tif (es.currentSeqId + count) >= es.maxSeqId {\n\t\treqSteps := DefaultEtcdSteps\n\t\tif count > DefaultEtcdSteps {\n\t\t\treqSteps += count\n\t\t}\n\t\tmaxId, err := batchGetSequenceFromEtcd(es.keysAPI, reqSteps)\n\t\tglog.V(4).Infof(\"get max sequence id from etcd, %d\", maxId)\n\t\tif err != nil {\n\t\t\tglog.Error(err)\n\t\t\treturn 0\n\t\t}\n\t\tes.currentSeqId, es.maxSeqId = maxId-reqSteps, maxId\n\t\tglog.V(4).Infof(\"current id : %d, max id : %d\", es.currentSeqId, es.maxSeqId)\n\n\t\tif err := writeSequenceFile(es.seqFile, es.maxSeqId, es.currentSeqId); err != nil {\n\t\t\tglog.Errorf(\"flush sequence to file failed, %v\", err)\n\t\t}\n\t}\n\n\tret := es.currentSeqId\n\tes.currentSeqId += count\n\treturn ret\n}\n\n\/**\ninstead of collecting the max value from volume server,\nthe max value should be saved in local config file and ETCD cluster\n*\/\nfunc (es *EtcdSequencer) SetMax(seenValue uint64) {\n\tes.sequenceLock.Lock()\n\tdefer es.sequenceLock.Unlock()\n\tif seenValue > es.maxSeqId {\n\t\tmaxId, err := setMaxSequenceToEtcd(es.keysAPI, seenValue)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"set Etcd Max sequence failed : %v\", err)\n\t\t\treturn\n\t\t}\n\t\tes.currentSeqId, es.maxSeqId = maxId, maxId\n\n\t\tif err := writeSequenceFile(es.seqFile, maxId, maxId); err != nil {\n\t\t\tglog.Errorf(\"flush sequence to file failed, %v\", err)\n\t\t}\n\t}\n}\n\nfunc (es *EtcdSequencer) GetMax() uint64 {\n\treturn es.maxSeqId\n}\n\nfunc (es *EtcdSequencer) Peek() uint64 {\n\treturn es.currentSeqId\n}\n\nfunc batchGetSequenceFromEtcd(kvApi client.KeysAPI, step uint64) (uint64, error) {\n\tif step <= 0 {\n\t\treturn 0, fmt.Errorf(\"the step must be large than 1\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)\n\tvar endSeqValue uint64 = 0\n\tdefer cancel()\n\tfor {\n\t\tgetResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true})\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif getResp.Node == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tprevValue := getResp.Node.Value\n\t\tprevSeqValue, err := strconv.ParseUint(prevValue, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"get sequence from etcd failed, %v\", err)\n\t\t}\n\t\tendSeqValue = prevSeqValue + step\n\t\tendSeqStr := strconv.FormatUint(endSeqValue, 10)\n\n\t\t_, err = kvApi.Set(ctx, EtcdKeySequence, endSeqStr, &client.SetOptions{PrevValue: prevValue})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tglog.Error(err)\n\t}\n\n\treturn endSeqValue, nil\n}\n\n\/**\nupdate the value of the key EtcdKeySequence in ETCD cluster with the parameter of maxSeq,\nwhen the value of the key EtcdKeySequence is equal to or large than the parameter maxSeq,\nreturn the value of EtcdKeySequence in the ETCD cluster;\nwhen the value of the EtcdKeySequence is less than the parameter maxSeq,\nreturn the value of the parameter maxSeq\n*\/\nfunc setMaxSequenceToEtcd(kvApi client.KeysAPI, maxSeq uint64) (uint64, error) {\n\tmaxSeqStr := strconv.FormatUint(maxSeq, 10)\n\tctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)\n\tdefer cancel()\n\n\tfor {\n\t\tgetResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true})\n\t\tif err != nil {\n\t\t\tif ce, ok := err.(client.Error); ok && (ce.Code == client.ErrorCodeKeyNotFound) {\n\t\t\t\t_, err := kvApi.Create(ctx, EtcdKeySequence, maxSeqStr)\n\t\t\t\tif err == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif ce, ok = err.(client.Error); ok && (ce.Code == client.ErrorCodeNodeExist) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn 0, err\n\t\t\t} else {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\tif getResp.Node == nil {\n\t\t\tcontinue\n\t\t}\n\t\tprevSeqStr := getResp.Node.Value\n\t\tprevSeq, err := strconv.ParseUint(prevSeqStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif prevSeq >= maxSeq {\n\t\t\treturn prevSeq, nil\n\t\t}\n\n\t\t_, err = kvApi.Set(ctx, EtcdKeySequence, maxSeqStr, &client.SetOptions{PrevValue: prevSeqStr})\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n}\n\nfunc openSequenceFile(file string) (*os.File, error) {\n\t_, err := os.Stat(file)\n\tif os.IsNotExist(err) {\n\t\tfid, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := writeSequenceFile(fid, 1, 0); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn fid, nil\n\t} else {\n\t\treturn os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644)\n\t}\n}\n\n\/*\nread sequence and step from sequence file\n*\/\nfunc readSequenceFile(file *os.File) (uint64, uint64, error) {\n\tsequence := make([]byte, FileMaxSequenceLength)\n\tsize, err := file.ReadAt(sequence, 0)\n\tif (err != nil) && (err != io.EOF) {\n\t\terr := fmt.Errorf(\"cannot read file %s, %v\", file.Name(), err)\n\t\treturn 0, 0, err\n\t}\n\tsequence = sequence[0:size]\n\tseqs := strings.Split(string(sequence), \":\")\n\tmaxId, err := strconv.ParseUint(seqs[0], 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"parse sequence from file failed, %v\", err)\n\t}\n\n\tif len(seqs) > 1 {\n\t\tstep, err := strconv.ParseUint(seqs[1], 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, 0, fmt.Errorf(\"parse sequence from file failed, %v\", err)\n\t\t}\n\t\treturn maxId, step, nil\n\t}\n\n\treturn maxId, 0, nil\n}\n\n\/**\nwrite the sequence and step to sequence file\n*\/\nfunc writeSequenceFile(file *os.File, sequence, step uint64) error {\n\t_ = step\n\tseqStr := fmt.Sprintf(\"%d:%d\", sequence, sequence)\n\tif _, err := file.Seek(0, 0); err != nil {\n\t\terr = fmt.Errorf(\"cannot seek to the beginning of %s: %v\", file.Name(), err)\n\t\treturn err\n\t}\n\tif err := file.Truncate(0); err != nil {\n\t\treturn fmt.Errorf(\"truncate sequence file faield : %v\", err)\n\t}\n\tif _, err := file.WriteString(seqStr); err != nil {\n\t\treturn fmt.Errorf(\"write file %s failed, %v\", file.Name(), err)\n\t}\n\tif err := file.Sync(); err != nil {\n\t\treturn fmt.Errorf(\"flush file %s failed, %v\", file.Name(), err)\n\t}\n\treturn nil\n}\n\n\/\/ the UT helper method\n\/\/ func deleteEtcdKey(kvApi client.KeysAPI, key string) error {\n\/\/ \tctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond)\n\/\/ \tdefer cancel()\n\/\/ \t_, err := kvApi.Delete(ctx, key, &client.DeleteOptions{Dir: false})\n\/\/ \tif err != nil {\n\/\/ \t\treturn err\n\/\/ \t}\n\/\/ \treturn nil\n\/\/ }\n<|endoftext|>"} {"text":"<commit_before>package cache\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/grafana\/metrictank\/mdata\/cache\/accnt\"\n\t\"github.com\/grafana\/metrictank\/mdata\/chunk\"\n\t\"github.com\/grafana\/metrictank\/stats\"\n\t\"github.com\/grafana\/metrictank\/tracing\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/raintank\/worldping-api\/pkg\/log\"\n\t\"github.com\/rakyll\/globalconf\"\n)\n\nvar (\n\tmaxSize uint64\n\tsearchFwdBug = stats.NewCounter32(\"recovered_errors.cache.metric.searchForwardBug\")\n)\n\nfunc init() {\n\tflags := flag.NewFlagSet(\"chunk-cache\", flag.ExitOnError)\n\t\/\/ (1024 ^ 3) * 4 = 4294967296 = 4G\n\tflags.Uint64Var(&maxSize, \"max-size\", 4294967296, \"Maximum size of chunk cache in bytes\")\n\tglobalconf.Register(\"chunk-cache\", flags)\n}\n\ntype CCache struct {\n\tsync.RWMutex\n\n\t\/\/ one CCacheMetric struct per metric key, indexed by the key\n\tmetricCache map[string]*CCacheMetric\n\n\t\/\/ sets of metric keys, indexed by their raw metric keys\n\tmetricRawKeys map[string]map[string]struct{}\n\n\t\/\/ accounting for the cache. keeps track of when data needs to be evicted\n\t\/\/ and what should be evicted\n\taccnt accnt.Accnt\n\n\t\/\/ channel that's only used to signal go routines to stop\n\tstop chan interface{}\n\n\ttracer opentracing.Tracer\n}\n\nfunc NewCCache() *CCache {\n\tcc := &CCache{\n\t\tmetricCache: make(map[string]*CCacheMetric),\n\t\tmetricRawKeys: make(map[string]map[string]struct{}),\n\t\taccnt: accnt.NewFlatAccnt(maxSize),\n\t\tstop: make(chan interface{}),\n\t\ttracer: opentracing.NoopTracer{},\n\t}\n\tgo cc.evictLoop()\n\treturn cc\n}\n\nfunc (c *CCache) SetTracer(t opentracing.Tracer) {\n\tc.tracer = t\n}\n\nfunc (c *CCache) evictLoop() {\n\tevictQ := c.accnt.GetEvictQ()\n\tfor {\n\t\tselect {\n\t\tcase target := <-evictQ:\n\t\t\tc.evict(target)\n\t\tcase _ = <-c.stop:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ takes a raw key and deletes all archives associated with it from cache\nfunc (c *CCache) DelMetric(rawMetric string) (int, int) {\n\tarchives, series := 0, 0\n\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tmets, ok := c.metricRawKeys[rawMetric]\n\tif !ok {\n\t\treturn archives, series\n\t}\n\n\tfor met := range mets {\n\t\tdelete(c.metricCache, met)\n\t\tc.accnt.DelMetric(met)\n\t\tarchives++\n\t}\n\n\tdelete(c.metricRawKeys, rawMetric)\n\tseries++\n\n\treturn series, archives\n}\n\n\/\/ adds the given chunk to the cache, but only if the metric is sufficiently hot\nfunc (c *CCache) CacheIfHot(metric string, prev uint32, itergen chunk.IterGen) {\n\tc.RLock()\n\n\tvar met *CCacheMetric\n\tvar ok bool\n\n\t\/\/ if this metric is not cached at all it is not hot\n\tif met, ok = c.metricCache[metric]; !ok {\n\t\tc.RUnlock()\n\t\treturn\n\t}\n\n\t\/\/ if the previous chunk is not cached we consider the metric not hot enough to cache this chunk\n\t\/\/ only works reliably if the last chunk of that metric is span aware, otherwise lastTs() will be guessed\n\t\/\/ conservatively which means that the returned value will probably be lower than the real last ts\n\tif met.lastTs() < itergen.Ts {\n\t\tc.RUnlock()\n\t\treturn\n\t}\n\n\taccnt.CacheChunkPushHot.Inc()\n\n\tc.RUnlock()\n\tmet.Add(prev, itergen)\n}\n\nfunc (c *CCache) Add(metric, rawMetric string, prev uint32, itergen chunk.IterGen) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tccm, ok := c.metricCache[metric]\n\tif !ok {\n\t\tccm = NewCCacheMetric()\n\t\tccm.Init(rawMetric, prev, itergen)\n\t\tc.metricCache[metric] = ccm\n\n\t\t\/\/ if we do not have this raw key yet, create an entry for it\n\t\tccms, ok := c.metricRawKeys[rawMetric]\n\t\tif !ok {\n\t\t\tccms = make(map[string]struct{})\n\t\t\tc.metricRawKeys[rawMetric] = ccms\n\t\t}\n\n\t\t\/\/ if we don't have it yet, associate the metric with this raw key\n\t\tif _, ok = ccms[metric]; !ok {\n\t\t\tccms[metric] = struct{}{}\n\t\t}\n\t} else {\n\t\tccm.Add(prev, itergen)\n\t}\n\n\tc.accnt.AddChunk(metric, itergen.Ts, itergen.Size())\n}\n\nfunc (cc *CCache) Reset() (int, int) {\n\tcc.Lock()\n\tcc.accnt.Reset()\n\tseries := len(cc.metricRawKeys)\n\tarchives := len(cc.metricCache)\n\tcc.metricCache = make(map[string]*CCacheMetric)\n\tcc.metricRawKeys = make(map[string]map[string]struct{})\n\tcc.Unlock()\n\treturn series, archives\n}\n\nfunc (c *CCache) Stop() {\n\tc.accnt.Stop()\n\tc.stop <- nil\n}\n\nfunc (c *CCache) evict(target *accnt.EvictTarget) {\n\tc.Lock()\n\t\/\/ evict() might get called many times in a loop, but we don't want it to block\n\t\/\/ cache reads with the write lock, so we yield right after unlocking to allow\n\t\/\/ reads to go first.\n\tdefer runtime.Gosched()\n\tdefer c.Unlock()\n\n\tccm, ok := c.metricCache[target.Metric]\n\tif !ok {\n\t\treturn\n\t}\n\n\tlog.Debug(\"CCache evict: evicting chunk %d on metric %s\\n\", target.Ts, target.Metric)\n\tlength := c.metricCache[target.Metric].Del(target.Ts)\n\tif length == 0 {\n\t\tdelete(c.metricCache, target.Metric)\n\n\t\t\/\/ this key should alway be present, if not there there is a corruption of the state\n\t\tdelete(c.metricRawKeys[ccm.RawMetric], target.Metric)\n\t\tif len(c.metricRawKeys[ccm.RawMetric]) == 0 {\n\t\t\tdelete(c.metricRawKeys, ccm.RawMetric)\n\t\t}\n\t}\n}\n\nfunc (c *CCache) Search(ctx context.Context, metric string, from, until uint32) *CCSearchResult {\n\tctx, span := tracing.NewSpan(ctx, c.tracer, \"CCache.Search\")\n\tdefer span.Finish()\n\tvar hit chunk.IterGen\n\tvar cm *CCacheMetric\n\tvar ok bool\n\tres := &CCSearchResult{\n\t\tFrom: from,\n\t\tUntil: until,\n\t}\n\n\tif from == until {\n\t\treturn res\n\t}\n\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\tif cm, ok = c.metricCache[metric]; !ok {\n\t\tspan.SetTag(\"cache\", \"miss\")\n\t\taccnt.CacheMetricMiss.Inc()\n\t\treturn res\n\t}\n\n\tcm.Search(ctx, metric, res, from, until)\n\tif len(res.Start) == 0 && len(res.End) == 0 {\n\t\tspan.SetTag(\"cache\", \"miss\")\n\t\taccnt.CacheMetricMiss.Inc()\n\t} else {\n\n\t\taccnt.CacheChunkHit.Add(len(res.Start) + len(res.End))\n\t\tgo func() {\n\t\t\tfor _, hit = range res.Start {\n\t\t\t\tc.accnt.HitChunk(metric, hit.Ts)\n\t\t\t}\n\t\t\tfor _, hit = range res.End {\n\t\t\t\tc.accnt.HitChunk(metric, hit.Ts)\n\t\t\t}\n\t\t}()\n\n\t\tif res.Complete {\n\t\t\tspan.SetTag(\"cache\", \"hit-full\")\n\t\t\taccnt.CacheMetricHitFull.Inc()\n\t\t} else {\n\t\t\tspan.SetTag(\"cache\", \"hit-partial\")\n\t\t\taccnt.CacheMetricHitPartial.Inc()\n\t\t}\n\t}\n\n\treturn res\n}\n<commit_msg>small CCache.Add optimization<commit_after>package cache\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/grafana\/metrictank\/mdata\/cache\/accnt\"\n\t\"github.com\/grafana\/metrictank\/mdata\/chunk\"\n\t\"github.com\/grafana\/metrictank\/stats\"\n\t\"github.com\/grafana\/metrictank\/tracing\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/raintank\/worldping-api\/pkg\/log\"\n\t\"github.com\/rakyll\/globalconf\"\n)\n\nvar (\n\tmaxSize uint64\n\tsearchFwdBug = stats.NewCounter32(\"recovered_errors.cache.metric.searchForwardBug\")\n)\n\nfunc init() {\n\tflags := flag.NewFlagSet(\"chunk-cache\", flag.ExitOnError)\n\t\/\/ (1024 ^ 3) * 4 = 4294967296 = 4G\n\tflags.Uint64Var(&maxSize, \"max-size\", 4294967296, \"Maximum size of chunk cache in bytes\")\n\tglobalconf.Register(\"chunk-cache\", flags)\n}\n\ntype CCache struct {\n\tsync.RWMutex\n\n\t\/\/ one CCacheMetric struct per metric key, indexed by the key\n\tmetricCache map[string]*CCacheMetric\n\n\t\/\/ sets of metric keys, indexed by their raw metric keys\n\tmetricRawKeys map[string]map[string]struct{}\n\n\t\/\/ accounting for the cache. keeps track of when data needs to be evicted\n\t\/\/ and what should be evicted\n\taccnt accnt.Accnt\n\n\t\/\/ channel that's only used to signal go routines to stop\n\tstop chan interface{}\n\n\ttracer opentracing.Tracer\n}\n\nfunc NewCCache() *CCache {\n\tcc := &CCache{\n\t\tmetricCache: make(map[string]*CCacheMetric),\n\t\tmetricRawKeys: make(map[string]map[string]struct{}),\n\t\taccnt: accnt.NewFlatAccnt(maxSize),\n\t\tstop: make(chan interface{}),\n\t\ttracer: opentracing.NoopTracer{},\n\t}\n\tgo cc.evictLoop()\n\treturn cc\n}\n\nfunc (c *CCache) SetTracer(t opentracing.Tracer) {\n\tc.tracer = t\n}\n\nfunc (c *CCache) evictLoop() {\n\tevictQ := c.accnt.GetEvictQ()\n\tfor {\n\t\tselect {\n\t\tcase target := <-evictQ:\n\t\t\tc.evict(target)\n\t\tcase _ = <-c.stop:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ takes a raw key and deletes all archives associated with it from cache\nfunc (c *CCache) DelMetric(rawMetric string) (int, int) {\n\tarchives, series := 0, 0\n\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tmets, ok := c.metricRawKeys[rawMetric]\n\tif !ok {\n\t\treturn archives, series\n\t}\n\n\tfor met := range mets {\n\t\tdelete(c.metricCache, met)\n\t\tc.accnt.DelMetric(met)\n\t\tarchives++\n\t}\n\n\tdelete(c.metricRawKeys, rawMetric)\n\tseries++\n\n\treturn series, archives\n}\n\n\/\/ adds the given chunk to the cache, but only if the metric is sufficiently hot\nfunc (c *CCache) CacheIfHot(metric string, prev uint32, itergen chunk.IterGen) {\n\tc.RLock()\n\n\tvar met *CCacheMetric\n\tvar ok bool\n\n\t\/\/ if this metric is not cached at all it is not hot\n\tif met, ok = c.metricCache[metric]; !ok {\n\t\tc.RUnlock()\n\t\treturn\n\t}\n\n\t\/\/ if the previous chunk is not cached we consider the metric not hot enough to cache this chunk\n\t\/\/ only works reliably if the last chunk of that metric is span aware, otherwise lastTs() will be guessed\n\t\/\/ conservatively which means that the returned value will probably be lower than the real last ts\n\tif met.lastTs() < itergen.Ts {\n\t\tc.RUnlock()\n\t\treturn\n\t}\n\n\taccnt.CacheChunkPushHot.Inc()\n\n\tc.RUnlock()\n\tmet.Add(prev, itergen)\n}\n\nfunc (c *CCache) Add(metric, rawMetric string, prev uint32, itergen chunk.IterGen) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tccm, ok := c.metricCache[metric]\n\tif !ok {\n\t\tccm = NewCCacheMetric()\n\t\tccm.Init(rawMetric, prev, itergen)\n\t\tc.metricCache[metric] = ccm\n\n\t\t\/\/ if we do not have this raw key yet, create the entry with the association\n\t\tccms, ok := c.metricRawKeys[rawMetric]\n\t\tif !ok {\n\t\t\tc.metricRawKeys[rawMetric] = map[string]struct{}{\n\t\t\t\tmetric: {},\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ otherwise, make sure the association exists\n\t\t\tccms[metric] = struct{}{}\n\t\t}\n\t} else {\n\t\tccm.Add(prev, itergen)\n\t}\n\n\tc.accnt.AddChunk(metric, itergen.Ts, itergen.Size())\n}\n\nfunc (cc *CCache) Reset() (int, int) {\n\tcc.Lock()\n\tcc.accnt.Reset()\n\tseries := len(cc.metricRawKeys)\n\tarchives := len(cc.metricCache)\n\tcc.metricCache = make(map[string]*CCacheMetric)\n\tcc.metricRawKeys = make(map[string]map[string]struct{})\n\tcc.Unlock()\n\treturn series, archives\n}\n\nfunc (c *CCache) Stop() {\n\tc.accnt.Stop()\n\tc.stop <- nil\n}\n\nfunc (c *CCache) evict(target *accnt.EvictTarget) {\n\tc.Lock()\n\t\/\/ evict() might get called many times in a loop, but we don't want it to block\n\t\/\/ cache reads with the write lock, so we yield right after unlocking to allow\n\t\/\/ reads to go first.\n\tdefer runtime.Gosched()\n\tdefer c.Unlock()\n\n\tccm, ok := c.metricCache[target.Metric]\n\tif !ok {\n\t\treturn\n\t}\n\n\tlog.Debug(\"CCache evict: evicting chunk %d on metric %s\\n\", target.Ts, target.Metric)\n\tlength := c.metricCache[target.Metric].Del(target.Ts)\n\tif length == 0 {\n\t\tdelete(c.metricCache, target.Metric)\n\n\t\t\/\/ this key should alway be present, if not there there is a corruption of the state\n\t\tdelete(c.metricRawKeys[ccm.RawMetric], target.Metric)\n\t\tif len(c.metricRawKeys[ccm.RawMetric]) == 0 {\n\t\t\tdelete(c.metricRawKeys, ccm.RawMetric)\n\t\t}\n\t}\n}\n\nfunc (c *CCache) Search(ctx context.Context, metric string, from, until uint32) *CCSearchResult {\n\tctx, span := tracing.NewSpan(ctx, c.tracer, \"CCache.Search\")\n\tdefer span.Finish()\n\tvar hit chunk.IterGen\n\tvar cm *CCacheMetric\n\tvar ok bool\n\tres := &CCSearchResult{\n\t\tFrom: from,\n\t\tUntil: until,\n\t}\n\n\tif from == until {\n\t\treturn res\n\t}\n\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\tif cm, ok = c.metricCache[metric]; !ok {\n\t\tspan.SetTag(\"cache\", \"miss\")\n\t\taccnt.CacheMetricMiss.Inc()\n\t\treturn res\n\t}\n\n\tcm.Search(ctx, metric, res, from, until)\n\tif len(res.Start) == 0 && len(res.End) == 0 {\n\t\tspan.SetTag(\"cache\", \"miss\")\n\t\taccnt.CacheMetricMiss.Inc()\n\t} else {\n\n\t\taccnt.CacheChunkHit.Add(len(res.Start) + len(res.End))\n\t\tgo func() {\n\t\t\tfor _, hit = range res.Start {\n\t\t\t\tc.accnt.HitChunk(metric, hit.Ts)\n\t\t\t}\n\t\t\tfor _, hit = range res.End {\n\t\t\t\tc.accnt.HitChunk(metric, hit.Ts)\n\t\t\t}\n\t\t}()\n\n\t\tif res.Complete {\n\t\t\tspan.SetTag(\"cache\", \"hit-full\")\n\t\t\taccnt.CacheMetricHitFull.Inc()\n\t\t} else {\n\t\t\tspan.SetTag(\"cache\", \"hit-partial\")\n\t\t\taccnt.CacheMetricHitPartial.Inc()\n\t\t}\n\t}\n\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package types is where exportable API structures go. This is so we can share\n\/\/ the XML marshalling and unmarshalling with the services.\npackage types\n\nimport (\n\t\"encoding\/xml\"\n\t\"time\"\n)\n\ntype Version struct {\n\tXMLName xml.Name `xml:\"version\"`\n\tApp *App `xml:\"app\"`\n\tPackage *Package `xml:\"package\"`\n}\n\ntype App struct {\n\tXMLName xml.Name `xml:\"app\"`\n\tId string `xml:\"id,attr\"`\n\tVersion string `xml:\"version,attr\"`\n\tTrack string `xml:\"track,attr\"`\n\tDate time.Time `xml:\"-\"`\n\tIsActive bool `xml:\"-\"`\n}\n\ntype Package struct {\n\tXMLName xml.Name `xml:\"package\"`\n\tName string `xml:\"name,attr\"` \/\/ Package filename\n\tSize string `xml:\"size,attr\"` \/\/ Size of the file (in bytes)\n\tPath string `xml:\"path,attr\"` \/\/ Path from the root to the file\n\tSha1Sum string `xml:\"sha1sum,attr\"` \/\/ SHA-1 hash of the file\n\tSha256Sum string `xml:\"sha256sum,attr\"` \/\/ Sha-256 hash of the file (extension)\n\tRequired bool `xml:\"required,attr\"`\n\tMetadataSignatureRsa string `xml:\"MetadataSignatureRsa,attr,omitempty\" json:\"metadata_signature_rsa\"`\n\tMetadataSize string `xml:\"MetadataSize,attr,omitempty\" json:\"metadata_size\"`\n}\n<commit_msg>feat(update\/types): nil datastore tag on XMLName<commit_after>\/\/ Package types is where exportable API structures go. This is so we can share\n\/\/ the XML marshalling and unmarshalling with the services.\npackage types\n\nimport (\n\t\"encoding\/xml\"\n\t\"time\"\n)\n\ntype Version struct {\n\tXMLName xml.Name `xml:\"version\" datastore:\"-\"`\n\tApp *App `xml:\"app\"`\n\tPackage *Package `xml:\"package\"`\n}\n\ntype App struct {\n\tXMLName xml.Name `xml:\"app\" datastore:\"-\"`\n\tId string `xml:\"id,attr\"`\n\tVersion string `xml:\"version,attr\"`\n\tTrack string `xml:\"track,attr\"`\n\tDate time.Time `xml:\"-\"`\n\tIsActive bool `xml:\"-\"`\n}\n\ntype Package struct {\n\tXMLName xml.Name `xml:\"package\" datastore:\"-\"`\n\tName string `xml:\"name,attr\"` \/\/ Package filename\n\tSize string `xml:\"size,attr\"` \/\/ Size of the file (in bytes)\n\tPath string `xml:\"path,attr\"` \/\/ Path from the root to the file\n\tSha1Sum string `xml:\"sha1sum,attr\"` \/\/ SHA-1 hash of the file\n\tSha256Sum string `xml:\"sha256sum,attr\"` \/\/ Sha-256 hash of the file (extension)\n\tRequired bool `xml:\"required,attr\"`\n\tMetadataSignatureRsa string `xml:\"MetadataSignatureRsa,attr,omitempty\" json:\"metadata_signature_rsa\"`\n\tMetadataSize string `xml:\"MetadataSize,attr,omitempty\" json:\"metadata_size\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCompressString(t *testing.T) {\n\texpected := \"This is the test string\"\n\tcompressed := CompressString(expected)\n\tdecompressed := DecompressString(compressed)\n\tassert.Equal(t, expected, decompressed)\n\tassert.True(t, len(compressed) > len(decompressed))\n}\n\nfunc TestCompress(t *testing.T) {\n\texpected := []byte(\"This is the test string\")\n\tcompressed := Compress(expected)\n\tdecompressed := Decompress(compressed)\n\tassert.Equal(t, expected, decompressed)\n\tassert.True(t, len(compressed) > len(decompressed))\n}\n<commit_msg>Converted compress tests to test subpackage<commit_after>package util_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/RobotsAndPencils\/go-saml\/util\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCompressString(t *testing.T) {\n\texpected := \"This is the test string\"\n\tcompressed := util.CompressString(expected)\n\tdecompressed := util.DecompressString(compressed)\n\tassert.Equal(t, expected, decompressed)\n\tassert.True(t, len(compressed) > len(decompressed))\n}\n\nfunc TestCompress(t *testing.T) {\n\texpected := []byte(\"This is the test string\")\n\tcompressed := util.Compress(expected)\n\tdecompressed := util.Decompress(compressed)\n\tassert.Equal(t, expected, decompressed)\n\tassert.True(t, len(compressed) > len(decompressed))\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/techjanitor\/pram-libs\/config\"\n)\n\nvar Services *Capabilities\n\n\/\/ controls capabilities for internal utils\ntype Capabilities struct {\n\n\t\/\/ utils\n\tUtils struct {\n\t\tAkismet bool\n\t}\n\n\t\/\/ storage capabilities\n\tStorage struct {\n\t\tAmazon bool\n\t\tGoogle bool\n\t}\n}\n\nfunc CheckServices() {\n\n\tServices = &Capabilities{}\n\n\tif config.Settings.Amazon.Key != \"\" {\n\t\tServices.Storage.Amazon = true\n\t}\n\n\tif config.Settings.Google.Key != \"\" {\n\t\tServices.Storage.Google = true\n\t}\n\n\tif config.Settings.Akismet.Key != \"\" {\n\t\tServices.Utils.Akismet = true\n\t}\n\n}\n\nfunc (c Capabilities) Print() {\n\n\t\/\/ Marshal the structs into JSON\n\toutput, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"%s\\n\", output)\n\n}\n<commit_msg>add local config<commit_after>package utils\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/techjanitor\/pram-libs\/config\"\n)\n\nvar Services *Capabilities\n\n\/\/ controls capabilities for internal utils\ntype Capabilities struct {\n\n\t\/\/ utils\n\tUtils struct {\n\t\tAkismet bool\n\t}\n\n\t\/\/ storage capabilities\n\tStorage struct {\n\t\tAmazon bool\n\t\tGoogle bool\n\t}\n}\n\nfunc init() {\n\tServices = &Capabilities{}\n}\n\nfunc CheckServices() {\n\n\tif config.Settings.Amazon.Key != \"\" {\n\t\tServices.Storage.Amazon = true\n\t}\n\n\tif config.Settings.Google.Key != \"\" {\n\t\tServices.Storage.Google = true\n\t}\n\n\tif config.Settings.Akismet.Key != \"\" {\n\t\tServices.Utils.Akismet = true\n\t}\n\n}\n\nfunc (c Capabilities) Print() {\n\n\t\/\/ Marshal the structs into JSON\n\toutput, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"%s\\n\", output)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"strconv\"\n\n\te \"github.com\/techjanitor\/pram-libs\/errors\"\n\tr \"github.com\/techjanitor\/pram-libs\/redis\"\n)\n\nvar (\n\tmaxLogins int = 5\n\tlimitSeconds uint = 300\n)\n\n\/\/ will increment a counter in redis to limit login attempts\nfunc LoginCounter(userid uint) (err error) {\n\n\t\/\/ convert userid to string\n\tuid := strconv.Itoa(int(userid))\n\n\t\/\/ Initialize cache handle\n\tcache := r.RedisCache\n\n\t\/\/ key is like login:21\n\tkey := fmt.Sprintf(\"login:%s\", uid)\n\n\t\/\/ increment login key\n\tresult, err := cache.Incr(key)\n\tif err != nil {\n\t\treturn e.ErrInternalError\n\t}\n\n\t\/\/ increment login key\n\terr = cache.Expire(key, limitSeconds)\n\tif err != nil {\n\t\treturn e.ErrInternalError\n\t}\n\n\tif result >= maxLogins {\n\t\treturn e.ErrMaxLogins\n\t}\n\n\treturn\n\n}\n<commit_msg>add redis and audit libs<commit_after>package utils\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\te \"github.com\/techjanitor\/pram-libs\/errors\"\n\t\"github.com\/techjanitor\/pram-libs\/redis\"\n)\n\nvar (\n\tmaxLogins int = 5\n\tlimitSeconds uint = 300\n)\n\n\/\/ will increment a counter in redis to limit login attempts\nfunc LoginCounter(userid uint) (err error) {\n\n\t\/\/ convert userid to string\n\tuid := strconv.Itoa(int(userid))\n\n\t\/\/ Initialize cache handle\n\tcache := redis.RedisCache\n\n\t\/\/ key is like login:21\n\tkey := fmt.Sprintf(\"login:%s\", uid)\n\n\t\/\/ increment login key\n\tresult, err := cache.Incr(key)\n\tif err != nil {\n\t\treturn e.ErrInternalError\n\t}\n\n\t\/\/ increment login key\n\terr = cache.Expire(key, limitSeconds)\n\tif err != nil {\n\t\treturn e.ErrInternalError\n\t}\n\n\tif result >= maxLogins {\n\t\treturn e.ErrMaxLogins\n\t}\n\n\treturn\n\n}\n<|endoftext|>"} {"text":"<commit_before>package vault\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/armon\/go-metrics\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nconst (\n\t\/\/ policySubPath is the sub-path used for the policy store\n\t\/\/ view. This is nested under the system view.\n\tpolicySubPath = \"policy\/\"\n\n\t\/\/ policyCacheSize is the number of policies that are kept cached\n\tpolicyCacheSize = 1024\n)\n\n\/\/ PolicyStore is used to provide durable storage of policy, and to\n\/\/ manage ACLs associated with them.\ntype PolicyStore struct {\n\tview *BarrierView\n\tlru *lru.TwoQueueCache\n}\n\n\/\/ PolicyEntry is used to store a policy by name\ntype PolicyEntry struct {\n\tVersion int\n\tRaw string\n}\n\n\/\/ NewPolicyStore creates a new PolicyStore that is backed\n\/\/ using a given view. It used used to durable store and manage named policy.\nfunc NewPolicyStore(view *BarrierView) *PolicyStore {\n\tcache, _ := lru.New2Q(policyCacheSize)\n\tp := &PolicyStore{\n\t\tview: view,\n\t\tlru: cache,\n\t}\n\treturn p\n}\n\n\/\/ setupPolicyStore is used to initialize the policy store\n\/\/ when the vault is being unsealed.\nfunc (c *Core) setupPolicyStore() error {\n\t\/\/ Create a sub-view\n\tview := c.systemBarrierView.SubView(policySubPath)\n\n\t\/\/ Create the policy store\n\tc.policyStore = NewPolicyStore(view)\n\n\t\/\/ Ensure that the default policy exists, and if not, create it\n\tpolicy, err := c.policyStore.GetPolicy(\"default\")\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"error fetching default policy from store: {{err}}\", err)\n\t}\n\tif policy == nil {\n\t\terr := c.policyStore.createDefaultPolicy()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ teardownPolicyStore is used to reverse setupPolicyStore\n\/\/ when the vault is being sealed.\nfunc (c *Core) teardownPolicyStore() error {\n\tc.policyStore = nil\n\treturn nil\n}\n\n\/\/ SetPolicy is used to create or update the given policy\nfunc (ps *PolicyStore) SetPolicy(p *Policy) error {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"set_policy\"}, time.Now())\n\tif p.Name == \"root\" {\n\t\treturn fmt.Errorf(\"cannot update root policy\")\n\t}\n\tif p.Name == \"\" {\n\t\treturn fmt.Errorf(\"policy name missing\")\n\t}\n\n\t\/\/ Create the entry\n\tentry, err := logical.StorageEntryJSON(p.Name, &PolicyEntry{\n\t\tVersion: 2,\n\t\tRaw: p.Raw,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create entry: %v\", err)\n\t}\n\tif err := ps.view.Put(entry); err != nil {\n\t\treturn fmt.Errorf(\"failed to persist policy: %v\", err)\n\t}\n\n\t\/\/ Update the LRU cache\n\tps.lru.Add(p.Name, p)\n\treturn nil\n}\n\n\/\/ GetPolicy is used to fetch the named policy\nfunc (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"get_policy\"}, time.Now())\n\t\/\/ Check for cached policy\n\tif raw, ok := ps.lru.Get(name); ok {\n\t\treturn raw.(*Policy), nil\n\t}\n\n\t\/\/ Special case the root policy\n\tif name == \"root\" {\n\t\tp := &Policy{Name: \"root\"}\n\t\tps.lru.Add(p.Name, p)\n\t\treturn p, nil\n\t}\n\n\t\/\/ Load the policy in\n\tout, err := ps.view.Get(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read policy: %v\", err)\n\t}\n\tif out == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ In Vault 0.1.X we stored the raw policy, but in\n\t\/\/ Vault 0.2 we switch to the PolicyEntry\n\tpolicyEntry := new(PolicyEntry)\n\tvar policy *Policy\n\tif err := out.DecodeJSON(policyEntry); err == nil {\n\t\t\/\/ Parse normally\n\t\tp, err := Parse(policyEntry.Raw)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse policy: %v\", err)\n\t\t}\n\t\tp.Name = name\n\t\tpolicy = p\n\n\t} else {\n\t\t\/\/ On error, attempt to use V1 parsing\n\t\tp, err := Parse(string(out.Value))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse policy: %v\", err)\n\t\t}\n\t\tp.Name = name\n\n\t\t\/\/ V1 used implicit glob, we need to do a fix-up\n\t\tfor _, pp := range p.Paths {\n\t\t\tpp.Glob = true\n\t\t}\n\t\tpolicy = p\n\t}\n\n\t\/\/ Update the LRU cache\n\tps.lru.Add(name, policy)\n\treturn policy, nil\n}\n\n\/\/ ListPolicies is used to list the available policies\nfunc (ps *PolicyStore) ListPolicies() ([]string, error) {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"list_policies\"}, time.Now())\n\t\/\/ Scan the view, since the policy names are the same as the\n\t\/\/ key names.\n\treturn CollectKeys(ps.view)\n}\n\n\/\/ DeletePolicy is used to delete the named policy\nfunc (ps *PolicyStore) DeletePolicy(name string) error {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"delete_policy\"}, time.Now())\n\tif name == \"root\" {\n\t\treturn fmt.Errorf(\"cannot delete root policy\")\n\t}\n\tif name == \"default\" {\n\t\treturn fmt.Errorf(\"cannot delete default policy\")\n\t}\n\tif err := ps.view.Delete(name); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete policy: %v\", err)\n\t}\n\n\t\/\/ Clear the cache\n\tps.lru.Remove(name)\n\treturn nil\n}\n\n\/\/ ACL is used to return an ACL which is built using the\n\/\/ named policies.\nfunc (ps *PolicyStore) ACL(names ...string) (*ACL, error) {\n\t\/\/ Fetch the policies\n\tvar policy []*Policy\n\tfor _, name := range names {\n\t\tp, err := ps.GetPolicy(name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get policy '%s': %v\", name, err)\n\t\t}\n\t\tpolicy = append(policy, p)\n\t}\n\n\t\/\/ Construct the ACL\n\tacl, err := NewACL(policy)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to construct ACL: %v\", err)\n\t}\n\treturn acl, nil\n}\n\nfunc (ps *PolicyStore) createDefaultPolicy() error {\n\tpolicy, err := Parse(`\npath \"auth\/token\/lookup-self\" {\n capabilities = [\"read\"]\n}\n\npath \"auth\/token\/renew-self\" {\n capabilities = [\"update\"]\n}\n\npath \"auth\/token\/revoke-self\" {\n capabilities = [\"update\"]\n}\n\npath \"cubbyhole\/*\" {\n capabilities = [\"create\", \"read\", \"update\", \"delete\", \"list\"]\n}\n`)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"error parsing default policy: {{err}}\", err)\n\t}\n\n\tif policy == nil {\n\t\treturn fmt.Errorf(\"parsing default policy resulted in nil policy\")\n\t}\n\n\tpolicy.Name = \"default\"\n\treturn ps.SetPolicy(policy)\n}\n<commit_msg>Add listing of cubbyhole's root to the default policy.<commit_after>package vault\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/armon\/go-metrics\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nconst (\n\t\/\/ policySubPath is the sub-path used for the policy store\n\t\/\/ view. This is nested under the system view.\n\tpolicySubPath = \"policy\/\"\n\n\t\/\/ policyCacheSize is the number of policies that are kept cached\n\tpolicyCacheSize = 1024\n)\n\n\/\/ PolicyStore is used to provide durable storage of policy, and to\n\/\/ manage ACLs associated with them.\ntype PolicyStore struct {\n\tview *BarrierView\n\tlru *lru.TwoQueueCache\n}\n\n\/\/ PolicyEntry is used to store a policy by name\ntype PolicyEntry struct {\n\tVersion int\n\tRaw string\n}\n\n\/\/ NewPolicyStore creates a new PolicyStore that is backed\n\/\/ using a given view. It used used to durable store and manage named policy.\nfunc NewPolicyStore(view *BarrierView) *PolicyStore {\n\tcache, _ := lru.New2Q(policyCacheSize)\n\tp := &PolicyStore{\n\t\tview: view,\n\t\tlru: cache,\n\t}\n\treturn p\n}\n\n\/\/ setupPolicyStore is used to initialize the policy store\n\/\/ when the vault is being unsealed.\nfunc (c *Core) setupPolicyStore() error {\n\t\/\/ Create a sub-view\n\tview := c.systemBarrierView.SubView(policySubPath)\n\n\t\/\/ Create the policy store\n\tc.policyStore = NewPolicyStore(view)\n\n\t\/\/ Ensure that the default policy exists, and if not, create it\n\tpolicy, err := c.policyStore.GetPolicy(\"default\")\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"error fetching default policy from store: {{err}}\", err)\n\t}\n\tif policy == nil {\n\t\terr := c.policyStore.createDefaultPolicy()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ teardownPolicyStore is used to reverse setupPolicyStore\n\/\/ when the vault is being sealed.\nfunc (c *Core) teardownPolicyStore() error {\n\tc.policyStore = nil\n\treturn nil\n}\n\n\/\/ SetPolicy is used to create or update the given policy\nfunc (ps *PolicyStore) SetPolicy(p *Policy) error {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"set_policy\"}, time.Now())\n\tif p.Name == \"root\" {\n\t\treturn fmt.Errorf(\"cannot update root policy\")\n\t}\n\tif p.Name == \"\" {\n\t\treturn fmt.Errorf(\"policy name missing\")\n\t}\n\n\t\/\/ Create the entry\n\tentry, err := logical.StorageEntryJSON(p.Name, &PolicyEntry{\n\t\tVersion: 2,\n\t\tRaw: p.Raw,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create entry: %v\", err)\n\t}\n\tif err := ps.view.Put(entry); err != nil {\n\t\treturn fmt.Errorf(\"failed to persist policy: %v\", err)\n\t}\n\n\t\/\/ Update the LRU cache\n\tps.lru.Add(p.Name, p)\n\treturn nil\n}\n\n\/\/ GetPolicy is used to fetch the named policy\nfunc (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"get_policy\"}, time.Now())\n\t\/\/ Check for cached policy\n\tif raw, ok := ps.lru.Get(name); ok {\n\t\treturn raw.(*Policy), nil\n\t}\n\n\t\/\/ Special case the root policy\n\tif name == \"root\" {\n\t\tp := &Policy{Name: \"root\"}\n\t\tps.lru.Add(p.Name, p)\n\t\treturn p, nil\n\t}\n\n\t\/\/ Load the policy in\n\tout, err := ps.view.Get(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read policy: %v\", err)\n\t}\n\tif out == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ In Vault 0.1.X we stored the raw policy, but in\n\t\/\/ Vault 0.2 we switch to the PolicyEntry\n\tpolicyEntry := new(PolicyEntry)\n\tvar policy *Policy\n\tif err := out.DecodeJSON(policyEntry); err == nil {\n\t\t\/\/ Parse normally\n\t\tp, err := Parse(policyEntry.Raw)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse policy: %v\", err)\n\t\t}\n\t\tp.Name = name\n\t\tpolicy = p\n\n\t} else {\n\t\t\/\/ On error, attempt to use V1 parsing\n\t\tp, err := Parse(string(out.Value))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse policy: %v\", err)\n\t\t}\n\t\tp.Name = name\n\n\t\t\/\/ V1 used implicit glob, we need to do a fix-up\n\t\tfor _, pp := range p.Paths {\n\t\t\tpp.Glob = true\n\t\t}\n\t\tpolicy = p\n\t}\n\n\t\/\/ Update the LRU cache\n\tps.lru.Add(name, policy)\n\treturn policy, nil\n}\n\n\/\/ ListPolicies is used to list the available policies\nfunc (ps *PolicyStore) ListPolicies() ([]string, error) {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"list_policies\"}, time.Now())\n\t\/\/ Scan the view, since the policy names are the same as the\n\t\/\/ key names.\n\treturn CollectKeys(ps.view)\n}\n\n\/\/ DeletePolicy is used to delete the named policy\nfunc (ps *PolicyStore) DeletePolicy(name string) error {\n\tdefer metrics.MeasureSince([]string{\"policy\", \"delete_policy\"}, time.Now())\n\tif name == \"root\" {\n\t\treturn fmt.Errorf(\"cannot delete root policy\")\n\t}\n\tif name == \"default\" {\n\t\treturn fmt.Errorf(\"cannot delete default policy\")\n\t}\n\tif err := ps.view.Delete(name); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete policy: %v\", err)\n\t}\n\n\t\/\/ Clear the cache\n\tps.lru.Remove(name)\n\treturn nil\n}\n\n\/\/ ACL is used to return an ACL which is built using the\n\/\/ named policies.\nfunc (ps *PolicyStore) ACL(names ...string) (*ACL, error) {\n\t\/\/ Fetch the policies\n\tvar policy []*Policy\n\tfor _, name := range names {\n\t\tp, err := ps.GetPolicy(name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get policy '%s': %v\", name, err)\n\t\t}\n\t\tpolicy = append(policy, p)\n\t}\n\n\t\/\/ Construct the ACL\n\tacl, err := NewACL(policy)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to construct ACL: %v\", err)\n\t}\n\treturn acl, nil\n}\n\nfunc (ps *PolicyStore) createDefaultPolicy() error {\n\tpolicy, err := Parse(`\npath \"auth\/token\/lookup-self\" {\n capabilities = [\"read\"]\n}\n\npath \"auth\/token\/renew-self\" {\n capabilities = [\"update\"]\n}\n\npath \"auth\/token\/revoke-self\" {\n capabilities = [\"update\"]\n}\n\npath \"cubbyhole\/*\" {\n capabilities = [\"create\", \"read\", \"update\", \"delete\", \"list\"]\n}\n\npath \"cubbyhole\" {\n capabilities = [\"list\"]\n}\n`)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"error parsing default policy: {{err}}\", err)\n\t}\n\n\tif policy == nil {\n\t\treturn fmt.Errorf(\"parsing default policy resulted in nil policy\")\n\t}\n\n\tpolicy.Name = \"default\"\n\treturn ps.SetPolicy(policy)\n}\n<|endoftext|>"} {"text":"<commit_before>package notifiers\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/components\/simplejson\"\n\t\"github.com\/grafana\/grafana\/pkg\/log\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/services\/alerting\"\n)\n\nconst DefaultDingdingMsgType = \"link\"\nconst DingdingOptionsTemplate = `\n <h3 class=\"page-heading\">DingDing settings<\/h3>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">Url<\/span>\n <input type=\"text\" required class=\"gf-form-input max-width-26\" ng-model=\"ctrl.model.settings.url\" placeholder=\"https:\/\/oapi.dingtalk.com\/robot\/send?access_token=xxxxxxxxx\"><\/input>\n <\/div>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">MessageType<\/span>\n <select class=\"gf-form-input max-width-14\" ng-model=\"ctrl.model.settings.msgType\" ng-options=\"s for s in ['link','actionCard']\" ng-init=\"ctrl.model.settings.msgType=ctrl.model.settings.msgType || '` + DefaultDingdingMsgType + `'\"><\/select>\n <\/div>\n`\n\nfunc init() {\n\talerting.RegisterNotifier(&alerting.NotifierPlugin{\n\t\tType: \"dingding\",\n\t\tName: \"DingDing\",\n\t\tDescription: \"Sends HTTP POST request to DingDing\",\n\t\tFactory: NewDingDingNotifier,\n\t\tOptionsTemplate: DingdingOptionsTemplate,\n\t})\n\n}\n\nfunc NewDingDingNotifier(model *m.AlertNotification) (alerting.Notifier, error) {\n\turl := model.Settings.Get(\"url\").MustString()\n\tif url == \"\" {\n\t\treturn nil, alerting.ValidationError{Reason: \"Could not find url property in settings\"}\n\t}\n\n\tmsgType := model.Settings.Get(\"msgType\").MustString(DefaultDingdingMsgType)\n\n\treturn &DingDingNotifier{\n\t\tNotifierBase: NewNotifierBase(model),\n\t\tMsgType: msgType,\n\t\tUrl: url,\n\t\tlog: log.New(\"alerting.notifier.dingding\"),\n\t}, nil\n}\n\ntype DingDingNotifier struct {\n\tNotifierBase\n\tMsgType string\n\tUrl string\n\tlog log.Logger\n}\n\nfunc (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {\n\tthis.log.Info(\"Sending dingding\")\n\n\tmessageUrl, err := evalContext.GetRuleUrl()\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to get messageUrl\", \"error\", err, \"dingding\", this.Name)\n\t\tmessageUrl = \"\"\n\t}\n\tthis.log.Info(\"messageUrl:\" + messageUrl)\n\n\tmessage := evalContext.Rule.Message\n\tpicUrl := evalContext.ImagePublicUrl\n\ttitle := evalContext.GetNotificationTitle()\n\tif message == \"\" {\n\t\tmessage = title\n\t}\n\n\tfor i, match := range evalContext.EvalMatches {\n\t\tmessage += fmt.Sprintf(\"\\\\n%2d. %s value %s\", i+1, match.Metric, match.Value)\n\t}\n\n\tvar bodyStr string\n\tif this.MsgType == \"actionCard\" {\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"actionCard\",\n\t\t\t\"actionCard\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"singleTitle\": \"More\",\n\t\t\t\t\"singleURL\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t} else {\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"link\",\n\t\t\t\"link\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"picUrl\": \"` + picUrl + `\",\n\t\t\t\t\"messageUrl\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t}\n\n\tbodyJSON, err := simplejson.NewJson([]byte(bodyStr))\n\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to create Json data\", \"error\", err, \"dingding\", this.Name)\n\t}\n\n\tbody, err := bodyJSON.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := &m.SendWebhookSync{\n\t\tUrl: this.Url,\n\t\tBody: string(body),\n\t}\n\n\tif err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {\n\t\tthis.log.Error(\"Failed to send DingDing\", \"error\", err, \"dingding\", this.Name)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Optimize the Dingding match values format<commit_after>package notifiers\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/components\/simplejson\"\n\t\"github.com\/grafana\/grafana\/pkg\/log\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/services\/alerting\"\n)\n\nconst DefaultDingdingMsgType = \"link\"\nconst DingdingOptionsTemplate = `\n <h3 class=\"page-heading\">DingDing settings<\/h3>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">Url<\/span>\n <input type=\"text\" required class=\"gf-form-input max-width-26\" ng-model=\"ctrl.model.settings.url\" placeholder=\"https:\/\/oapi.dingtalk.com\/robot\/send?access_token=xxxxxxxxx\"><\/input>\n <\/div>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">MessageType<\/span>\n <select class=\"gf-form-input max-width-14\" ng-model=\"ctrl.model.settings.msgType\" ng-options=\"s for s in ['link','actionCard']\" ng-init=\"ctrl.model.settings.msgType=ctrl.model.settings.msgType || '` + DefaultDingdingMsgType + `'\"><\/select>\n <\/div>\n`\n\nfunc init() {\n\talerting.RegisterNotifier(&alerting.NotifierPlugin{\n\t\tType: \"dingding\",\n\t\tName: \"DingDing\",\n\t\tDescription: \"Sends HTTP POST request to DingDing\",\n\t\tFactory: NewDingDingNotifier,\n\t\tOptionsTemplate: DingdingOptionsTemplate,\n\t})\n\n}\n\nfunc NewDingDingNotifier(model *m.AlertNotification) (alerting.Notifier, error) {\n\turl := model.Settings.Get(\"url\").MustString()\n\tif url == \"\" {\n\t\treturn nil, alerting.ValidationError{Reason: \"Could not find url property in settings\"}\n\t}\n\n\tmsgType := model.Settings.Get(\"msgType\").MustString(DefaultDingdingMsgType)\n\n\treturn &DingDingNotifier{\n\t\tNotifierBase: NewNotifierBase(model),\n\t\tMsgType: msgType,\n\t\tUrl: url,\n\t\tlog: log.New(\"alerting.notifier.dingding\"),\n\t}, nil\n}\n\ntype DingDingNotifier struct {\n\tNotifierBase\n\tMsgType string\n\tUrl string\n\tlog log.Logger\n}\n\nfunc (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {\n\tthis.log.Info(\"Sending dingding\")\n\n\tmessageUrl, err := evalContext.GetRuleUrl()\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to get messageUrl\", \"error\", err, \"dingding\", this.Name)\n\t\tmessageUrl = \"\"\n\t}\n\tthis.log.Info(\"messageUrl:\" + messageUrl)\n\n\tmessage := evalContext.Rule.Message\n\tpicUrl := evalContext.ImagePublicUrl\n\ttitle := evalContext.GetNotificationTitle()\n\tif message == \"\" {\n\t\tmessage = title\n\t}\n\n\tfor i, match := range evalContext.EvalMatches {\n\t\tmessage += fmt.Sprintf(\"\\\\n%2d. %s: %s\", i+1, match.Metric, match.Value)\n\t}\n\n\tvar bodyStr string\n\tif this.MsgType == \"actionCard\" {\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"actionCard\",\n\t\t\t\"actionCard\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"singleTitle\": \"More\",\n\t\t\t\t\"singleURL\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t} else {\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"link\",\n\t\t\t\"link\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"picUrl\": \"` + picUrl + `\",\n\t\t\t\t\"messageUrl\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t}\n\n\tbodyJSON, err := simplejson.NewJson([]byte(bodyStr))\n\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to create Json data\", \"error\", err, \"dingding\", this.Name)\n\t}\n\n\tbody, err := bodyJSON.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := &m.SendWebhookSync{\n\t\tUrl: this.Url,\n\t\tBody: string(body),\n\t}\n\n\tif err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {\n\t\tthis.log.Error(\"Failed to send DingDing\", \"error\", err, \"dingding\", this.Name)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package notifiers\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/components\/simplejson\"\n\t\"github.com\/grafana\/grafana\/pkg\/log\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/services\/alerting\"\n)\n\nconst DefaultDingdingMsgType = \"link\"\nconst DingdingOptionsTemplate = `\n <h3 class=\"page-heading\">DingDing settings<\/h3>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">Url<\/span>\n <input type=\"text\" required class=\"gf-form-input max-width-70\" ng-model=\"ctrl.model.settings.url\" placeholder=\"https:\/\/oapi.dingtalk.com\/robot\/send?access_token=xxxxxxxxx\"><\/input>\n <\/div>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">MessageType<\/span>\n <select class=\"gf-form-input max-width-14\" ng-model=\"ctrl.model.settings.msgType\" ng-options=\"s for s in ['link','actionCard']\" ng-init=\"ctrl.model.settings.msgType=ctrl.model.settings.msgType || '` + DefaultDingdingMsgType + `'\"><\/select>\n <\/div>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">OpenInBrowser<\/span>\n <gf-form-switch class=\"gf-form\" checked=\"ctrl.model.settings.openInBrowser\"><\/gf-form-switch>\n <info-popover mode=\"right-normal\">Open the message url in browser instead of inside of Dingding<\/info-popover>\n <\/div>\n`\n\nfunc init() {\n\talerting.RegisterNotifier(&alerting.NotifierPlugin{\n\t\tType: \"dingding\",\n\t\tName: \"DingDing\",\n\t\tDescription: \"Sends HTTP POST request to DingDing\",\n\t\tFactory: NewDingDingNotifier,\n\t\tOptionsTemplate: DingdingOptionsTemplate,\n\t})\n\n}\n\nfunc NewDingDingNotifier(model *m.AlertNotification) (alerting.Notifier, error) {\n\turl := model.Settings.Get(\"url\").MustString()\n\tif url == \"\" {\n\t\treturn nil, alerting.ValidationError{Reason: \"Could not find url property in settings\"}\n\t}\n\n\tmsgType := model.Settings.Get(\"msgType\").MustString(DefaultDingdingMsgType)\n\topenInBrowser := model.Settings.Get(\"openInBrowser\").MustBool(true)\n\n\treturn &DingDingNotifier{\n\t\tNotifierBase: NewNotifierBase(model),\n\t\tOpenInBrowser: openInBrowser,\n\t\tMsgType: msgType,\n\t\tUrl: url,\n\t\tlog: log.New(\"alerting.notifier.dingding\"),\n\t}, nil\n}\n\ntype DingDingNotifier struct {\n\tNotifierBase\n\tMsgType string\n\tOpenInBrowser bool \/\/Set whether the message url will open outside of Dingding\n\tUrl string\n\tlog log.Logger\n}\n\nfunc (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {\n\tthis.log.Info(\"Sending dingding\")\n\n\tmessageUrl, err := evalContext.GetRuleUrl()\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to get messageUrl\", \"error\", err, \"dingding\", this.Name)\n\t\tmessageUrl = \"\"\n\t}\n\n\tif this.OpenInBrowser {\n\t\tq := url.Values{\n\t\t\t\"pc_slide\": {\"false\"},\n\t\t\t\"url\": {messageUrl},\n\t\t}\n\n\t\t\/\/ Use special link to auto open the message url outside of Dingding\n\t\t\/\/ Refer: https:\/\/open-doc.dingtalk.com\/docs\/doc.htm?treeId=385&articleId=104972&docType=1#s9\n\t\tmessageUrl = \"dingtalk:\/\/dingtalkclient\/page\/link?\" + q.Encode()\n\t}\n\n\tthis.log.Info(\"messageUrl:\" + messageUrl)\n\n\tmessage := evalContext.Rule.Message\n\tpicUrl := evalContext.ImagePublicUrl\n\ttitle := evalContext.GetNotificationTitle()\n\tif message == \"\" {\n\t\tmessage = title\n\t}\n\n\tfor i, match := range evalContext.EvalMatches {\n\t\tmessage += fmt.Sprintf(\"\\\\n%2d. %s: %s\", i+1, match.Metric, match.Value)\n\t}\n\n\tvar bodyStr string\n\tif this.MsgType == \"actionCard\" {\n\t\t\/\/ Embed the pic into the markdown directly because actionCard doesn't have a picUrl field\n\t\tif picUrl != \"\" {\n\t\t\tmessage = \"![](\" + picUrl + \")\\\\n\\\\n\" + message\n\t\t}\n\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"actionCard\",\n\t\t\t\"actionCard\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"singleTitle\": \"More\",\n\t\t\t\t\"singleURL\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t} else {\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"link\",\n\t\t\t\"link\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"picUrl\": \"` + picUrl + `\",\n\t\t\t\t\"messageUrl\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t}\n\n\tbodyJSON, err := simplejson.NewJson([]byte(bodyStr))\n\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to create Json data\", \"error\", err, \"dingding\", this.Name)\n\t}\n\n\tbody, err := bodyJSON.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := &m.SendWebhookSync{\n\t\tUrl: this.Url,\n\t\tBody: string(body),\n\t}\n\n\tif err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {\n\t\tthis.log.Error(\"Failed to send DingDing\", \"error\", err, \"dingding\", this.Name)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Remove option used to control within browser<commit_after>package notifiers\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/components\/simplejson\"\n\t\"github.com\/grafana\/grafana\/pkg\/log\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/services\/alerting\"\n)\n\nconst DefaultDingdingMsgType = \"link\"\nconst DingdingOptionsTemplate = `\n <h3 class=\"page-heading\">DingDing settings<\/h3>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">Url<\/span>\n <input type=\"text\" required class=\"gf-form-input max-width-70\" ng-model=\"ctrl.model.settings.url\" placeholder=\"https:\/\/oapi.dingtalk.com\/robot\/send?access_token=xxxxxxxxx\"><\/input>\n <\/div>\n <div class=\"gf-form\">\n <span class=\"gf-form-label width-10\">MessageType<\/span>\n <select class=\"gf-form-input max-width-14\" ng-model=\"ctrl.model.settings.msgType\" ng-options=\"s for s in ['link','actionCard']\" ng-init=\"ctrl.model.settings.msgType=ctrl.model.settings.msgType || '` + DefaultDingdingMsgType + `'\"><\/select>\n <\/div>\n`\n\nfunc init() {\n\talerting.RegisterNotifier(&alerting.NotifierPlugin{\n\t\tType: \"dingding\",\n\t\tName: \"DingDing\",\n\t\tDescription: \"Sends HTTP POST request to DingDing\",\n\t\tFactory: NewDingDingNotifier,\n\t\tOptionsTemplate: DingdingOptionsTemplate,\n\t})\n\n}\n\nfunc NewDingDingNotifier(model *m.AlertNotification) (alerting.Notifier, error) {\n\turl := model.Settings.Get(\"url\").MustString()\n\tif url == \"\" {\n\t\treturn nil, alerting.ValidationError{Reason: \"Could not find url property in settings\"}\n\t}\n\n\tmsgType := model.Settings.Get(\"msgType\").MustString(DefaultDingdingMsgType)\n\n\treturn &DingDingNotifier{\n\t\tNotifierBase: NewNotifierBase(model),\n\t\tMsgType: msgType,\n\t\tUrl: url,\n\t\tlog: log.New(\"alerting.notifier.dingding\"),\n\t}, nil\n}\n\ntype DingDingNotifier struct {\n\tNotifierBase\n\tMsgType string\n\tUrl string\n\tlog log.Logger\n}\n\nfunc (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {\n\tthis.log.Info(\"Sending dingding\")\n\n\tmessageUrl, err := evalContext.GetRuleUrl()\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to get messageUrl\", \"error\", err, \"dingding\", this.Name)\n\t\tmessageUrl = \"\"\n\t}\n\n\tq := url.Values{\n\t\t\"pc_slide\": {\"false\"},\n\t\t\"url\": {messageUrl},\n\t}\n\n\t\/\/ Use special link to auto open the message url outside of Dingding\n\t\/\/ Refer: https:\/\/open-doc.dingtalk.com\/docs\/doc.htm?treeId=385&articleId=104972&docType=1#s9\n\tmessageUrl = \"dingtalk:\/\/dingtalkclient\/page\/link?\" + q.Encode()\n\n\tthis.log.Info(\"messageUrl:\" + messageUrl)\n\n\tmessage := evalContext.Rule.Message\n\tpicUrl := evalContext.ImagePublicUrl\n\ttitle := evalContext.GetNotificationTitle()\n\tif message == \"\" {\n\t\tmessage = title\n\t}\n\n\tfor i, match := range evalContext.EvalMatches {\n\t\tmessage += fmt.Sprintf(\"\\\\n%2d. %s: %s\", i+1, match.Metric, match.Value)\n\t}\n\n\tvar bodyStr string\n\tif this.MsgType == \"actionCard\" {\n\t\t\/\/ Embed the pic into the markdown directly because actionCard doesn't have a picUrl field\n\t\tif picUrl != \"\" {\n\t\t\tmessage = \"![](\" + picUrl + \")\\\\n\\\\n\" + message\n\t\t}\n\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"actionCard\",\n\t\t\t\"actionCard\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"singleTitle\": \"More\",\n\t\t\t\t\"singleURL\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t} else {\n\t\tbodyStr = `{\n\t\t\t\"msgtype\": \"link\",\n\t\t\t\"link\": {\n\t\t\t\t\"text\": \"` + message + `\",\n\t\t\t\t\"title\": \"` + title + `\",\n\t\t\t\t\"picUrl\": \"` + picUrl + `\",\n\t\t\t\t\"messageUrl\": \"` + messageUrl + `\"\n\t\t\t}\n\t\t}`\n\t}\n\n\tbodyJSON, err := simplejson.NewJson([]byte(bodyStr))\n\n\tif err != nil {\n\t\tthis.log.Error(\"Failed to create Json data\", \"error\", err, \"dingding\", this.Name)\n\t}\n\n\tbody, err := bodyJSON.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := &m.SendWebhookSync{\n\t\tUrl: this.Url,\n\t\tBody: string(body),\n\t}\n\n\tif err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {\n\t\tthis.log.Error(\"Failed to send DingDing\", \"error\", err, \"dingding\", this.Name)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sysregistriesv2\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/containers\/image\/types\"\n)\n\n\/\/ systemRegistriesConfPath is the path to the system-wide registry\n\/\/ configuration file and is used to add\/subtract potential registries for\n\/\/ obtaining images. You can override this at build time with\n\/\/ -ldflags '-X github.com\/containers\/image\/sysregistries.systemRegistriesConfPath=$your_path'\nvar systemRegistriesConfPath = builtinRegistriesConfPath\n\n\/\/ builtinRegistriesConfPath is the path to the registry configuration file.\n\/\/ DO NOT change this, instead see systemRegistriesConfPath above.\nconst builtinRegistriesConfPath = \"\/etc\/containers\/registries.conf\"\n\n\/\/ Mirror represents a mirror. Mirrors can be used as pull-through caches for\n\/\/ registries.\ntype Mirror struct {\n\t\/\/ The mirror's URL.\n\tURL string `toml:\"url\"`\n\t\/\/ If true, certs verification will be skipped and HTTP (non-TLS)\n\t\/\/ connections will be allowed.\n\tInsecure bool `toml:\"insecure\"`\n}\n\n\/\/ Registry represents a registry.\ntype Registry struct {\n\t\/\/ Serializable registry URL.\n\tURL string `toml:\"url\"`\n\t\/\/ The registry's mirrors.\n\tMirrors []Mirror `toml:\"mirror\"`\n\t\/\/ If true, pulling from the registry will be blocked.\n\tBlocked bool `toml:\"blocked\"`\n\t\/\/ If true, certs verification will be skipped and HTTP (non-TLS)\n\t\/\/ connections will be allowed.\n\tInsecure bool `toml:\"insecure\"`\n\t\/\/ If true, the registry can be used when pulling an unqualified image.\n\tSearch bool `toml:\"unqualified-search\"`\n\t\/\/ Prefix is used for matching images, and to translate one namespace to\n\t\/\/ another. If `Prefix=\"example.com\/bar\"`, `URL=\"example.com\/foo\/bar\"`\n\t\/\/ and we pull from \"example.com\/bar\/myimage:latest\", the image will\n\t\/\/ effectively be pulled from \"example.com\/foo\/bar\/myimage:latest\".\n\t\/\/ If no Prefix is specified, it defaults to the specified URL.\n\tPrefix string `toml:\"prefix\"`\n}\n\n\/\/ backwards compatability to sysregistries v1\ntype v1TOMLregistries struct {\n\tRegistries []string `toml:\"registries\"`\n}\n\n\/\/ tomlConfig is the data type used to unmarshal the toml config.\ntype tomlConfig struct {\n\tRegistries []Registry `toml:\"registry\"`\n\t\/\/ backwards compatability to sysregistries v1\n\tV1Registries struct {\n\t\tSearch v1TOMLregistries `toml:\"search\"`\n\t\tInsecure v1TOMLregistries `toml:\"insecure\"`\n\t\tBlock v1TOMLregistries `toml:\"block\"`\n\t} `toml:\"registries\"`\n}\n\n\/\/ InvalidRegistries represents an invalid registry configurations. An example\n\/\/ is when \"registry.com\" is defined multiple times in the configuration but\n\/\/ with conflicting security settings.\ntype InvalidRegistries struct {\n\ts string\n}\n\n\/\/ Error returns the error string.\nfunc (e *InvalidRegistries) Error() string {\n\treturn e.s\n}\n\n\/\/ parseURL parses the input string, performs some sanity checks and returns\n\/\/ the sanitized input string. An error is returned in case parsing fails or\n\/\/ or if URI scheme or user is set.\nfunc parseURL(input string) (string, error) {\n\ttrimmed := strings.TrimRight(input, \"\/\")\n\n\tif trimmed == \"\" {\n\t\treturn \"\", &InvalidRegistries{s: \"invalid URL: cannot be empty\"}\n\t}\n\n\turi, err := url.Parse(trimmed)\n\tif err != nil {\n\t\treturn \"\", &InvalidRegistries{s: fmt.Sprintf(\"invalid URL '%s': %v\", input, err)}\n\t}\n\n\t\/\/ Check if a URI SCheme is set.\n\t\/\/ Note that URLs that do not start with a slash after the scheme are\n\t\/\/ interpreted as `scheme:opaque[?query][#fragment]`.\n\tif uri.Scheme != \"\" && uri.Opaque == \"\" {\n\t\tmsg := fmt.Sprintf(\"invalid URL '%s': URI schemes are not supported\", input)\n\t\treturn \"\", &InvalidRegistries{s: msg}\n\t}\n\n\turi, err = url.Parse(\"http:\/\/\" + trimmed)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"invalid URL '%s': sanitized URL did not parse: %v\", input, err)\n\t\treturn \"\", &InvalidRegistries{s: msg}\n\t}\n\n\tif uri.User != nil {\n\t\tmsg := fmt.Sprintf(\"invalid URL '%s': user\/password are not supported\", trimmed)\n\t\treturn \"\", &InvalidRegistries{s: msg}\n\t}\n\n\treturn trimmed, nil\n}\n\n\/\/ getV1Registries transforms v1 registries in the config into an array of v2\n\/\/ registries of type Registry.\nfunc getV1Registries(config *tomlConfig) ([]Registry, error) {\n\tregMap := make(map[string]*Registry)\n\n\tgetRegistry := func(url string) (*Registry, error) { \/\/ Note: _pointer_ to a long-lived object\n\t\tvar err error\n\t\turl, err = parseURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg, exists := regMap[url]\n\t\tif !exists {\n\t\t\treg = &Registry{\n\t\t\t\tURL: url,\n\t\t\t\tMirrors: []Mirror{},\n\t\t\t\tPrefix: url,\n\t\t\t}\n\t\t\tregMap[url] = reg\n\t\t}\n\t\treturn reg, nil\n\t}\n\n\tfor _, search := range config.V1Registries.Search.Registries {\n\t\treg, err := getRegistry(search)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Search = true\n\t}\n\tfor _, blocked := range config.V1Registries.Block.Registries {\n\t\treg, err := getRegistry(blocked)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Blocked = true\n\t}\n\tfor _, insecure := range config.V1Registries.Insecure.Registries {\n\t\treg, err := getRegistry(insecure)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Insecure = true\n\t}\n\n\tregistries := []Registry{}\n\tfor _, reg := range regMap {\n\t\tregistries = append(registries, *reg)\n\t}\n\treturn registries, nil\n}\n\n\/\/ postProcessRegistries checks the consistency of all registries (e.g., set\n\/\/ the Prefix to URL if not set) and applies conflict checks. It returns an\n\/\/ array of cleaned registries and error in case of conflicts.\nfunc postProcessRegistries(regs []Registry) ([]Registry, error) {\n\tvar registries []Registry\n\tregMap := make(map[string][]Registry)\n\n\tfor _, reg := range regs {\n\t\tvar err error\n\n\t\t\/\/ make sure URL and Prefix are valid\n\t\treg.URL, err = parseURL(reg.URL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif reg.Prefix == \"\" {\n\t\t\treg.Prefix = reg.URL\n\t\t} else {\n\t\t\treg.Prefix, err = parseURL(reg.Prefix)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ make sure mirrors are valid\n\t\tfor _, mir := range reg.Mirrors {\n\t\t\tmir.URL, err = parseURL(mir.URL)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tregistries = append(registries, reg)\n\t\tregMap[reg.URL] = append(regMap[reg.URL], reg)\n\t}\n\n\t\/\/ Given a registry can be mentioned multiple times (e.g., to have\n\t\/\/ multiple prefixes backed by different mirrors), we need to make sure\n\t\/\/ there are no conflicts among them.\n\t\/\/\n\t\/\/ Note: we need to iterate over the registries array to ensure a\n\t\/\/ deterministic behavior which is not guaranteed by maps.\n\tfor _, reg := range registries {\n\t\tothers, _ := regMap[reg.URL]\n\t\tfor _, other := range others {\n\t\t\tif reg.Insecure != other.Insecure {\n\t\t\t\tmsg := fmt.Sprintf(\"registry '%s' is defined multiple times with conflicting 'insecure' setting\", reg.URL)\n\n\t\t\t\treturn nil, &InvalidRegistries{s: msg}\n\t\t\t}\n\t\t\tif reg.Blocked != other.Blocked {\n\t\t\t\tmsg := fmt.Sprintf(\"registry '%s' is defined multiple times with conflicting 'blocked' setting\", reg.URL)\n\t\t\t\treturn nil, &InvalidRegistries{s: msg}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn registries, nil\n}\n\n\/\/ GetRegistries loads and returns the registries specified in the config.\nfunc GetRegistries(ctx *types.SystemContext) ([]Registry, error) {\n\tconfig, err := loadRegistryConf(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tregistries := config.Registries\n\n\t\/\/ backwards compatibility for v1 configs\n\tv1Registries, err := getV1Registries(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(v1Registries) > 0 {\n\t\tif len(registries) > 0 {\n\t\t\treturn nil, &InvalidRegistries{s: \"mixing sysregistry v1\/v2 is not supported\"}\n\t\t}\n\t\tregistries = v1Registries\n\t}\n\n\treturn postProcessRegistries(registries)\n}\n\n\/\/ FindUnqualifiedSearchRegistries returns all registries that are configured\n\/\/ for unqualified image search (i.e., with Registry.Search == true).\nfunc FindUnqualifiedSearchRegistries(registries []Registry) []Registry {\n\tunqualified := []Registry{}\n\tfor _, reg := range registries {\n\t\tif reg.Search {\n\t\t\tunqualified = append(unqualified, reg)\n\t\t}\n\t}\n\treturn unqualified\n}\n\n\/\/ FindRegistry returns the Registry with the longest prefix for ref. If no\n\/\/ Registry prefixes the image, nil is returned.\nfunc FindRegistry(ref string, registries []Registry) *Registry {\n\treg := Registry{}\n\tprefixLen := 0\n\tfor _, r := range registries {\n\t\tif strings.HasPrefix(ref, r.Prefix) {\n\t\t\tlength := len(r.Prefix)\n\t\t\tif length > prefixLen {\n\t\t\t\treg = r\n\t\t\t\tprefixLen = length\n\t\t\t}\n\t\t}\n\t}\n\tif prefixLen != 0 {\n\t\treturn ®\n\t}\n\treturn nil\n}\n\n\/\/ Reads the global registry file from the filesystem. Returns a byte array.\nfunc readRegistryConf(ctx *types.SystemContext) ([]byte, error) {\n\tdirPath := systemRegistriesConfPath\n\tif ctx != nil {\n\t\tif ctx.SystemRegistriesConfPath != \"\" {\n\t\t\tdirPath = ctx.SystemRegistriesConfPath\n\t\t} else if ctx.RootForImplicitAbsolutePaths != \"\" {\n\t\t\tdirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)\n\t\t}\n\t}\n\tconfigBytes, err := ioutil.ReadFile(dirPath)\n\treturn configBytes, err\n}\n\n\/\/ Used in unittests to parse custom configs without a types.SystemContext.\nvar readConf = readRegistryConf\n\n\/\/ Loads the registry configuration file from the filesystem and then unmarshals\n\/\/ it. Returns the unmarshalled object.\nfunc loadRegistryConf(ctx *types.SystemContext) (*tomlConfig, error) {\n\tconfig := &tomlConfig{}\n\n\tconfigBytes, err := readConf(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = toml.Unmarshal(configBytes, &config)\n\treturn config, err\n}\n<commit_msg>Document the subtleties of using url.Parse for validating registry prefixes.<commit_after>package sysregistriesv2\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/containers\/image\/types\"\n)\n\n\/\/ systemRegistriesConfPath is the path to the system-wide registry\n\/\/ configuration file and is used to add\/subtract potential registries for\n\/\/ obtaining images. You can override this at build time with\n\/\/ -ldflags '-X github.com\/containers\/image\/sysregistries.systemRegistriesConfPath=$your_path'\nvar systemRegistriesConfPath = builtinRegistriesConfPath\n\n\/\/ builtinRegistriesConfPath is the path to the registry configuration file.\n\/\/ DO NOT change this, instead see systemRegistriesConfPath above.\nconst builtinRegistriesConfPath = \"\/etc\/containers\/registries.conf\"\n\n\/\/ Mirror represents a mirror. Mirrors can be used as pull-through caches for\n\/\/ registries.\ntype Mirror struct {\n\t\/\/ The mirror's URL.\n\tURL string `toml:\"url\"`\n\t\/\/ If true, certs verification will be skipped and HTTP (non-TLS)\n\t\/\/ connections will be allowed.\n\tInsecure bool `toml:\"insecure\"`\n}\n\n\/\/ Registry represents a registry.\ntype Registry struct {\n\t\/\/ Serializable registry URL.\n\tURL string `toml:\"url\"`\n\t\/\/ The registry's mirrors.\n\tMirrors []Mirror `toml:\"mirror\"`\n\t\/\/ If true, pulling from the registry will be blocked.\n\tBlocked bool `toml:\"blocked\"`\n\t\/\/ If true, certs verification will be skipped and HTTP (non-TLS)\n\t\/\/ connections will be allowed.\n\tInsecure bool `toml:\"insecure\"`\n\t\/\/ If true, the registry can be used when pulling an unqualified image.\n\tSearch bool `toml:\"unqualified-search\"`\n\t\/\/ Prefix is used for matching images, and to translate one namespace to\n\t\/\/ another. If `Prefix=\"example.com\/bar\"`, `URL=\"example.com\/foo\/bar\"`\n\t\/\/ and we pull from \"example.com\/bar\/myimage:latest\", the image will\n\t\/\/ effectively be pulled from \"example.com\/foo\/bar\/myimage:latest\".\n\t\/\/ If no Prefix is specified, it defaults to the specified URL.\n\tPrefix string `toml:\"prefix\"`\n}\n\n\/\/ backwards compatability to sysregistries v1\ntype v1TOMLregistries struct {\n\tRegistries []string `toml:\"registries\"`\n}\n\n\/\/ tomlConfig is the data type used to unmarshal the toml config.\ntype tomlConfig struct {\n\tRegistries []Registry `toml:\"registry\"`\n\t\/\/ backwards compatability to sysregistries v1\n\tV1Registries struct {\n\t\tSearch v1TOMLregistries `toml:\"search\"`\n\t\tInsecure v1TOMLregistries `toml:\"insecure\"`\n\t\tBlock v1TOMLregistries `toml:\"block\"`\n\t} `toml:\"registries\"`\n}\n\n\/\/ InvalidRegistries represents an invalid registry configurations. An example\n\/\/ is when \"registry.com\" is defined multiple times in the configuration but\n\/\/ with conflicting security settings.\ntype InvalidRegistries struct {\n\ts string\n}\n\n\/\/ Error returns the error string.\nfunc (e *InvalidRegistries) Error() string {\n\treturn e.s\n}\n\n\/\/ parseURL parses the input string, performs some sanity checks and returns\n\/\/ the sanitized input string. An error is returned in case parsing fails or\n\/\/ or if URI scheme or user is set.\nfunc parseURL(input string) (string, error) {\n\ttrimmed := strings.TrimRight(input, \"\/\")\n\n\tif trimmed == \"\" {\n\t\treturn \"\", &InvalidRegistries{s: \"invalid URL: cannot be empty\"}\n\t}\n\n\t\/\/ Ultimately, we expect input of the form example.com[\/namespace\/…], a prefix\n\t\/\/ of a fully-expended reference (containers\/image\/docker\/Reference.String()).\n\t\/\/ c\/image\/docker\/Reference does not currently provide such a parser.\n\t\/\/ So, we use url.Parse(\"http:\/\/\"+trimmed) below to ~verify the format, possibly\n\t\/\/ letting some invalid input in, trading that off for a simpler parser.\n\t\/\/\n\t\/\/ url.Parse(\"http:\/\/\"+trimmed) is, sadly, too permissive, notably for\n\t\/\/ trimmed == \"http:\/\/example.com\/…\", url.Parse(\"http:\/\/http:\/\/example.com\/…\")\n\t\/\/ is accepted and parsed as\n\t\/\/ {Scheme: \"http\", Host: \"http:\", Path: \"\/\/example.com\/…\"}.\n\t\/\/\n\t\/\/ So, first we do an explicit check for an unwanted scheme prefix:\n\n\t\/\/ This will parse trimmed==\"http:\/\/example.com\/…\" with Scheme: \"http\". Perhaps surprisingly,\n\t\/\/ it also succeeds for the input we want to accept, in different ways:\n\t\/\/ \"example.com\" -> {Scheme:\"\", Opaque:\"\", Path:\"example.com\"}\n\t\/\/ \"example.com\/repo\" -> {Scheme:\"\", Opaque:\"\", Path:\"example.com\/repo\"}\n\t\/\/ \"example.com:5000\" -> {Scheme:\"example.com\", Opaque:\"5000\"}\n\t\/\/ \"example.com:5000\/repo\" -> {Scheme:\"example.com\", Opaque:\"5000\/repo\"}\n\turi, err := url.Parse(trimmed)\n\tif err != nil {\n\t\treturn \"\", &InvalidRegistries{s: fmt.Sprintf(\"invalid URL '%s': %v\", input, err)}\n\t}\n\n\t\/\/ Check if a URI Scheme is set.\n\t\/\/ Note that URLs that do not start with a slash after the scheme are\n\t\/\/ interpreted as `scheme:opaque[?query][#fragment]`; see above for examples.\n\tif uri.Scheme != \"\" && uri.Opaque == \"\" {\n\t\tmsg := fmt.Sprintf(\"invalid URL '%s': URI schemes are not supported\", input)\n\t\treturn \"\", &InvalidRegistries{s: msg}\n\t}\n\n\turi, err = url.Parse(\"http:\/\/\" + trimmed)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"invalid URL '%s': sanitized URL did not parse: %v\", input, err)\n\t\treturn \"\", &InvalidRegistries{s: msg}\n\t}\n\n\tif uri.User != nil {\n\t\tmsg := fmt.Sprintf(\"invalid URL '%s': user\/password are not supported\", trimmed)\n\t\treturn \"\", &InvalidRegistries{s: msg}\n\t}\n\n\treturn trimmed, nil\n}\n\n\/\/ getV1Registries transforms v1 registries in the config into an array of v2\n\/\/ registries of type Registry.\nfunc getV1Registries(config *tomlConfig) ([]Registry, error) {\n\tregMap := make(map[string]*Registry)\n\n\tgetRegistry := func(url string) (*Registry, error) { \/\/ Note: _pointer_ to a long-lived object\n\t\tvar err error\n\t\turl, err = parseURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg, exists := regMap[url]\n\t\tif !exists {\n\t\t\treg = &Registry{\n\t\t\t\tURL: url,\n\t\t\t\tMirrors: []Mirror{},\n\t\t\t\tPrefix: url,\n\t\t\t}\n\t\t\tregMap[url] = reg\n\t\t}\n\t\treturn reg, nil\n\t}\n\n\tfor _, search := range config.V1Registries.Search.Registries {\n\t\treg, err := getRegistry(search)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Search = true\n\t}\n\tfor _, blocked := range config.V1Registries.Block.Registries {\n\t\treg, err := getRegistry(blocked)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Blocked = true\n\t}\n\tfor _, insecure := range config.V1Registries.Insecure.Registries {\n\t\treg, err := getRegistry(insecure)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treg.Insecure = true\n\t}\n\n\tregistries := []Registry{}\n\tfor _, reg := range regMap {\n\t\tregistries = append(registries, *reg)\n\t}\n\treturn registries, nil\n}\n\n\/\/ postProcessRegistries checks the consistency of all registries (e.g., set\n\/\/ the Prefix to URL if not set) and applies conflict checks. It returns an\n\/\/ array of cleaned registries and error in case of conflicts.\nfunc postProcessRegistries(regs []Registry) ([]Registry, error) {\n\tvar registries []Registry\n\tregMap := make(map[string][]Registry)\n\n\tfor _, reg := range regs {\n\t\tvar err error\n\n\t\t\/\/ make sure URL and Prefix are valid\n\t\treg.URL, err = parseURL(reg.URL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif reg.Prefix == \"\" {\n\t\t\treg.Prefix = reg.URL\n\t\t} else {\n\t\t\treg.Prefix, err = parseURL(reg.Prefix)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ make sure mirrors are valid\n\t\tfor _, mir := range reg.Mirrors {\n\t\t\tmir.URL, err = parseURL(mir.URL)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tregistries = append(registries, reg)\n\t\tregMap[reg.URL] = append(regMap[reg.URL], reg)\n\t}\n\n\t\/\/ Given a registry can be mentioned multiple times (e.g., to have\n\t\/\/ multiple prefixes backed by different mirrors), we need to make sure\n\t\/\/ there are no conflicts among them.\n\t\/\/\n\t\/\/ Note: we need to iterate over the registries array to ensure a\n\t\/\/ deterministic behavior which is not guaranteed by maps.\n\tfor _, reg := range registries {\n\t\tothers, _ := regMap[reg.URL]\n\t\tfor _, other := range others {\n\t\t\tif reg.Insecure != other.Insecure {\n\t\t\t\tmsg := fmt.Sprintf(\"registry '%s' is defined multiple times with conflicting 'insecure' setting\", reg.URL)\n\n\t\t\t\treturn nil, &InvalidRegistries{s: msg}\n\t\t\t}\n\t\t\tif reg.Blocked != other.Blocked {\n\t\t\t\tmsg := fmt.Sprintf(\"registry '%s' is defined multiple times with conflicting 'blocked' setting\", reg.URL)\n\t\t\t\treturn nil, &InvalidRegistries{s: msg}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn registries, nil\n}\n\n\/\/ GetRegistries loads and returns the registries specified in the config.\nfunc GetRegistries(ctx *types.SystemContext) ([]Registry, error) {\n\tconfig, err := loadRegistryConf(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tregistries := config.Registries\n\n\t\/\/ backwards compatibility for v1 configs\n\tv1Registries, err := getV1Registries(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(v1Registries) > 0 {\n\t\tif len(registries) > 0 {\n\t\t\treturn nil, &InvalidRegistries{s: \"mixing sysregistry v1\/v2 is not supported\"}\n\t\t}\n\t\tregistries = v1Registries\n\t}\n\n\treturn postProcessRegistries(registries)\n}\n\n\/\/ FindUnqualifiedSearchRegistries returns all registries that are configured\n\/\/ for unqualified image search (i.e., with Registry.Search == true).\nfunc FindUnqualifiedSearchRegistries(registries []Registry) []Registry {\n\tunqualified := []Registry{}\n\tfor _, reg := range registries {\n\t\tif reg.Search {\n\t\t\tunqualified = append(unqualified, reg)\n\t\t}\n\t}\n\treturn unqualified\n}\n\n\/\/ FindRegistry returns the Registry with the longest prefix for ref. If no\n\/\/ Registry prefixes the image, nil is returned.\nfunc FindRegistry(ref string, registries []Registry) *Registry {\n\treg := Registry{}\n\tprefixLen := 0\n\tfor _, r := range registries {\n\t\tif strings.HasPrefix(ref, r.Prefix) {\n\t\t\tlength := len(r.Prefix)\n\t\t\tif length > prefixLen {\n\t\t\t\treg = r\n\t\t\t\tprefixLen = length\n\t\t\t}\n\t\t}\n\t}\n\tif prefixLen != 0 {\n\t\treturn ®\n\t}\n\treturn nil\n}\n\n\/\/ Reads the global registry file from the filesystem. Returns a byte array.\nfunc readRegistryConf(ctx *types.SystemContext) ([]byte, error) {\n\tdirPath := systemRegistriesConfPath\n\tif ctx != nil {\n\t\tif ctx.SystemRegistriesConfPath != \"\" {\n\t\t\tdirPath = ctx.SystemRegistriesConfPath\n\t\t} else if ctx.RootForImplicitAbsolutePaths != \"\" {\n\t\t\tdirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)\n\t\t}\n\t}\n\tconfigBytes, err := ioutil.ReadFile(dirPath)\n\treturn configBytes, err\n}\n\n\/\/ Used in unittests to parse custom configs without a types.SystemContext.\nvar readConf = readRegistryConf\n\n\/\/ Loads the registry configuration file from the filesystem and then unmarshals\n\/\/ it. Returns the unmarshalled object.\nfunc loadRegistryConf(ctx *types.SystemContext) (*tomlConfig, error) {\n\tconfig := &tomlConfig{}\n\n\tconfigBytes, err := readConf(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = toml.Unmarshal(configBytes, &config)\n\treturn config, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n This file is licensed under the Apache License, Version 2.0 (the \"License\").\n You may not use this file except in compliance with the License. A copy of\n the License is located at\n\n http:\/\/aws.amazon.com\/apache2.0\/\n\n This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n*\/\n\nresp, err := svc.UpdateService((&ecs.UpdateServiceInput{}).\n\tSetService(\"myService\").\n\tSetDeploymentConfiguration((&ecs.DeploymentConfiguration{}).\n\t\tSetMinimumHealthyPrecent(80),\n\t),\n)\n<commit_msg>Fixed typo<commit_after>\/*\n Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n This file is licensed under the Apache License, Version 2.0 (the \"License\").\n You may not use this file except in compliance with the License. A copy of\n the License is located at\n\n http:\/\/aws.amazon.com\/apache2.0\/\n\n This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n*\/\n\nresp, err := svc.UpdateService((&ecs.UpdateServiceInput{}).\n\tSetService(\"myService\").\n\tSetDeploymentConfiguration((&ecs.DeploymentConfiguration{}).\n\t\tSetMinimumHealthyPercent(80),\n\t),\n)\n<|endoftext|>"} {"text":"<commit_before>package volume\n\nimport \"testing\"\n\n\/\/ Set up fake volume\nvar fakeVol = Volume{\n\tTarget: \"\/foo\",\n\tBackupDir: \"\/back\",\n\tMount: \"\/mnt\",\n\tFullIfOlderThan: \"3W\",\n\tRemoveOlderThan: \"1Y\",\n}\n\n\/\/ TestNewVolume checks the creation of a new volume\nfunc TestNewVolume(t *testing.T) {\n\tif fakeVol.Target != \"\/foo\" {\n\t\tt.Fatalf(\"Volume target is wrong. Expected \/foo, got %v\", fakeVol.Target)\n\t}\n\n\tif fakeVol.BackupDir != \"\/back\" {\n\t\tt.Fatalf(\"Volume backup dir is wrong. Expected \/back, got %v\", fakeVol.BackupDir)\n\t}\n\n\tif fakeVol.Mount != \"\/mnt\" {\n\t\tt.Fatalf(\"Volume mount dir is wrong. Expected \/mnt, got %v\", fakeVol.Mount)\n\t}\n\n\tif fakeVol.FullIfOlderThan != \"3W\" {\n\t\tt.Fatalf(\"Volume FullIfOlderThan is wrong. Expected 3W, got %v\", fakeVol.FullIfOlderThan)\n\t}\n\n\tif fakeVol.RemoveOlderThan != \"1Y\" {\n\t\tt.Fatalf(\"Volume RemoveOlderThan is wrong. Expected 1Y, got %v\", fakeVol.RemoveOlderThan)\n\t}\n}\n<commit_msg>Fix volume tests<commit_after>package volume\n\nimport \"testing\"\n\n\/\/ Set up fake volume\nvar fakeVol = Volume{\n\tTarget: \"\/foo\",\n\tBackupDir: \"\/back\",\n\tMount: \"\/mnt\",\n\tConfig: &Config{\n\t\tFullIfOlderThan: \"3W\",\n\t\tRemoveOlderThan: \"1Y\",\n\t},\n}\n\n\/\/ TestNewVolume checks the creation of a new volume\nfunc TestNewVolume(t *testing.T) {\n\tif fakeVol.Target != \"\/foo\" {\n\t\tt.Fatalf(\"Volume target is wrong. Expected \/foo, got %v\", fakeVol.Target)\n\t}\n\n\tif fakeVol.BackupDir != \"\/back\" {\n\t\tt.Fatalf(\"Volume backup dir is wrong. Expected \/back, got %v\", fakeVol.BackupDir)\n\t}\n\n\tif fakeVol.Mount != \"\/mnt\" {\n\t\tt.Fatalf(\"Volume mount dir is wrong. Expected \/mnt, got %v\", fakeVol.Mount)\n\t}\n\n\tif fakeVol.Config.FullIfOlderThan != \"3W\" {\n\t\tt.Fatalf(\"Volume FullIfOlderThan is wrong. Expected 3W, got %v\", fakeVol.Config.FullIfOlderThan)\n\t}\n\n\tif fakeVol.Config.RemoveOlderThan != \"1Y\" {\n\t\tt.Fatalf(\"Volume RemoveOlderThan is wrong. Expected 1Y, got %v\", fakeVol.Config.RemoveOlderThan)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package migrations\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/antonve\/logger-api\/config\"\n\t\"github.com\/antonve\/logger-api\/models\"\n\n\t\"github.com\/DavidHuie\/gomigrate\"\n)\n\nfunc getMigrator() (*gomigrate.Migrator, error) {\n\t\/\/ @TODO: change how migration path is handled\n\tappPath := os.Getenv(\"APP_PATH\")\n\tif appPath == \"\" {\n\t\tappPath = fmt.Sprintf(\"%s\/src\/github.com\/antonve\/logger-api\", os.Getenv(\"GOPATH\"))\n\t}\n\n\treturn gomigrate.NewMigrator(models.GetSQLDatabase(), gomigrate.Postgres{}, fmt.Sprintf(\"%s\/%s\", appPath, config.GetConfig().MigrationsPath))\n}\n\n\/\/ Migrate migrates the database\nfunc Migrate() error {\n\tmigrator, err := getMigrator()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = migrator.Migrate()\n\n\treturn err\n}\n\n\/\/ Destroy the current environment's database\nfunc Destroy() error {\n\tif config.GetConfig().Environment == config.Environments[\"prod\"] {\n\t\treturn fmt.Errorf(\"Cannot destroy production.\")\n\t}\n\n\tdb := models.GetSQLConnection()\n\tdefer db.Close()\n\n\t\/\/ Drop database\n\t_, err := db.Exec(\"DROP DATABASE IF EXISTS \" + config.GetConfig().Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Create a new database\nfunc Create() error {\n\tdb := models.GetSQLConnection()\n\tdefer db.Close()\n\n\t\/\/ Create database\n\t_, err := db.Exec(\"CREATE DATABASE \" + config.GetConfig().Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Log which environment is being migrated<commit_after>package migrations\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/antonve\/logger-api\/config\"\n\t\"github.com\/antonve\/logger-api\/models\"\n\n\t\"github.com\/DavidHuie\/gomigrate\"\n)\n\nfunc getMigrator() (*gomigrate.Migrator, error) {\n\t\/\/ @TODO: change how migration path is handled\n\tappPath := os.Getenv(\"APP_PATH\")\n\tif appPath == \"\" {\n\t\tappPath = fmt.Sprintf(\"%s\/src\/github.com\/antonve\/logger-api\", os.Getenv(\"GOPATH\"))\n\t}\n\n\treturn gomigrate.NewMigrator(models.GetSQLDatabase(), gomigrate.Postgres{}, fmt.Sprintf(\"%s\/%s\", appPath, config.GetConfig().MigrationsPath))\n}\n\n\/\/ Migrate migrates the database\nfunc Migrate() error {\n\tlog.Printf(\"Migrating in environment: %s\", config.GetConfig().Environment)\n\tmigrator, err := getMigrator()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = migrator.Migrate()\n\n\treturn err\n}\n\n\/\/ Destroy the current environment's database\nfunc Destroy() error {\n\tif config.GetConfig().Environment == config.Environments[\"prod\"] {\n\t\treturn fmt.Errorf(\"Cannot destroy production.\")\n\t}\n\n\tdb := models.GetSQLConnection()\n\tdefer db.Close()\n\n\t\/\/ Drop database\n\t_, err := db.Exec(\"DROP DATABASE IF EXISTS \" + config.GetConfig().Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Create a new database\nfunc Create() error {\n\tdb := models.GetSQLConnection()\n\tdefer db.Close()\n\n\t\/\/ Create database\n\t_, err := db.Exec(\"CREATE DATABASE \" + config.GetConfig().Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage wallet\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com\/FactomProject\/btcutil\/base58\"\n\t\"github.com\/FactomProject\/factom\"\n\t\"github.com\/FactomProject\/factomd\/common\/factoid\"\n\t\"github.com\/FactomProject\/factomd\/common\/primitives\"\n\t\"github.com\/FactomProject\/goleveldb\/leveldb\"\n)\n\nvar (\n\tErrFeeTooHigh = errors.New(\"wallet: Overpaying Fee\")\n\tErrFeeTooLow = errors.New(\"wallet: Insufficient Fee\")\n\tErrNoSuchAddress = errors.New(\"wallet: No such address\")\n\tErrTXExists = errors.New(\"wallet: Transaction name already exists\")\n\tErrTXNotExists = errors.New(\"wallet: Transaction name was not found\")\n\tErrTXNoInputs = errors.New(\"wallet: Transaction has no inputs\")\n\tErrTXInvalidName = errors.New(\"wallet: Transaction name is not valid\")\n)\n\nfunc (w *Wallet) NewTransaction(name string) error {\n\tif _, exist := w.transactions[name]; exist {\n\t\treturn ErrTXExists\n\t}\n\n\t\/\/ check that the transaction name is valid\n\tif name == \"\" {\n\t\treturn ErrTXInvalidName\n\t}\n\tif len(name) > 32 {\n\t\treturn ErrTXInvalidName\n\t}\n\tif match, err := regexp.MatchString(\"[^a-zA-Z0-9_-]\", name); err != nil {\n\t\treturn err\n\t} else if match {\n\t\treturn ErrTXInvalidName\n\t}\n\n\tt := new(factoid.Transaction)\n\tt.SetTimestamp(primitives.NewTimestampNow())\n\tw.transactions[name] = t\n\treturn nil\n}\n\nfunc (w *Wallet) DeleteTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\tdelete(w.transactions, name)\n\treturn nil\n}\n\nfunc (w *Wallet) AddInput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ta, err := w.GetFCTAddress(address)\n\tif err == leveldb.ErrNotFound {\n\t\treturn ErrNoSuchAddress\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\t\/\/ First look if this is really an update\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tinput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Add our new input\n\ttrans.AddInput(adr, amount)\n\ttrans.AddRCD(factoid.NewRCD_1(a.PubBytes()))\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddECOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddECOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tamt, err := factoid.ValidateAmounts(input.GetAmount(), transfee)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tinput.SetAmount(amt)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an input to the transaction.\", address)\n}\n\nfunc (w *Wallet) SubFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(output.GetAmount() - transfee)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an output to the transaction.\", address)\n}\n\nfunc (w *Wallet) SignTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif err := checkFee(trans); err != nil {\n\t\treturn err\n\t}\n\n\tdata, err := trans.MarshalBinarySig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trcds := trans.GetRCDs()\n\tif len(rcds) == 0 {\n\t\treturn ErrTXNoInputs\n\t}\n\tfor i, rcd := range rcds {\n\t\ta, err := rcd.GetAddress()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := w.GetFCTAddress(primitives.ConvertFctAddressToUserStr(a))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsig := factoid.NewSingleSignatureBlock(f.SecBytes(), data)\n\t\ttrans.SetSignatureBlock(i, sig)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wallet) GetTransactions() map[string]*factoid.Transaction {\n\treturn w.transactions\n}\n\nfunc (w *Wallet) ComposeTransaction(name string) (*factom.JSON2Request, error) {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn nil, ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ttype txreq struct {\n\t\tTransaction string `json:\"transaction\"`\n\t}\n\n\tparam := new(txreq)\n\tif p, err := trans.MarshalBinary(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tparam.Transaction = hex.EncodeToString(p)\n\t}\n\n\treq := factom.NewJSON2Request(\"factoid-submit\", apiCounter(), param)\n\n\treturn req, nil\n}\n\nfunc checkFee(t *factoid.Transaction) error {\n\tins, err := t.TotalInputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\touts, err := t.TotalOutputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tecs, err := t.TotalECs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fee is the fee that will be paid\n\tfee := int64(ins) - int64(outs) - int64(ecs)\n\tfmt.Println(\"DEBUG: fee is\", fee)\n\t\n\tif fee <= 0 {\n\t\treturn ErrFeeTooLow\n\t}\n\n\trate, err := factom.GetRate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ cfee is the fee calculated for the transaction\n\tvar cfee int64\n\tif c, err := t.CalculateFee(rate); err != nil {\n\t\treturn err\n\t} else if c == 0 {\n\t\treturn errors.New(\"wallet: Could not calculate fee\")\n\t} else {\n\t\tcfee = int64(c)\n\t\tfmt.Println(\"DEBUG: cfee is\", cfee)\n\t}\n\n\t\/\/ fee is too low\n\tif fee < cfee {\n\t\treturn ErrFeeTooLow\n\t}\n\n\t\/\/ fee is too high (over 10x cfee)\n\tif fee >= cfee*10 {\n\t\treturn ErrFeeTooHigh\n\t}\n\n\treturn nil\n}\n<commit_msg>removed debug messages<commit_after>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage wallet\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com\/FactomProject\/btcutil\/base58\"\n\t\"github.com\/FactomProject\/factom\"\n\t\"github.com\/FactomProject\/factomd\/common\/factoid\"\n\t\"github.com\/FactomProject\/factomd\/common\/primitives\"\n\t\"github.com\/FactomProject\/goleveldb\/leveldb\"\n)\n\nvar (\n\tErrFeeTooHigh = errors.New(\"wallet: Overpaying Fee\")\n\tErrFeeTooLow = errors.New(\"wallet: Insufficient Fee\")\n\tErrNoSuchAddress = errors.New(\"wallet: No such address\")\n\tErrTXExists = errors.New(\"wallet: Transaction name already exists\")\n\tErrTXNotExists = errors.New(\"wallet: Transaction name was not found\")\n\tErrTXNoInputs = errors.New(\"wallet: Transaction has no inputs\")\n\tErrTXInvalidName = errors.New(\"wallet: Transaction name is not valid\")\n)\n\nfunc (w *Wallet) NewTransaction(name string) error {\n\tif _, exist := w.transactions[name]; exist {\n\t\treturn ErrTXExists\n\t}\n\n\t\/\/ check that the transaction name is valid\n\tif name == \"\" {\n\t\treturn ErrTXInvalidName\n\t}\n\tif len(name) > 32 {\n\t\treturn ErrTXInvalidName\n\t}\n\tif match, err := regexp.MatchString(\"[^a-zA-Z0-9_-]\", name); err != nil {\n\t\treturn err\n\t} else if match {\n\t\treturn ErrTXInvalidName\n\t}\n\n\tt := new(factoid.Transaction)\n\tt.SetTimestamp(primitives.NewTimestampNow())\n\tw.transactions[name] = t\n\treturn nil\n}\n\nfunc (w *Wallet) DeleteTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\tdelete(w.transactions, name)\n\treturn nil\n}\n\nfunc (w *Wallet) AddInput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ta, err := w.GetFCTAddress(address)\n\tif err == leveldb.ErrNotFound {\n\t\treturn ErrNoSuchAddress\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\t\/\/ First look if this is really an update\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tinput.SetAmount(amount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Add our new input\n\ttrans.AddInput(adr, amount)\n\ttrans.AddRCD(factoid.NewRCD_1(a.PubBytes()))\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddECOutput(name, address string, amount uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\ttrans.AddECOutput(adr, amount)\n\n\treturn nil\n}\n\nfunc (w *Wallet) AddFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta, err := w.GetFCTAddress(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadr := factoid.NewAddress(a.RCDHash())\n\n\tfor _, input := range trans.GetInputs() {\n\t\tif input.GetAddress().IsSameAs(adr) {\n\t\t\tamt, err := factoid.ValidateAmounts(input.GetAmount(), transfee)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tinput.SetAmount(amt)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an input to the transaction.\", address)\n}\n\nfunc (w *Wallet) SubFee(name, address string, rate uint64) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif !factom.IsValidAddress(address) {\n\t\treturn errors.New(\"Invalid Address\")\n\t}\n\n\t{\n\t\tins, err := trans.TotalInputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\touts, err := trans.TotalOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tecs, err := trans.TotalECs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ins != outs+ecs {\n\t\t\treturn fmt.Errorf(\"Inputs and outputs don't add up\")\n\t\t}\n\t}\n\n\ttransfee, err := trans.CalculateFee(rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadr := factoid.NewAddress(base58.Decode(address)[2:34])\n\n\tfor _, output := range trans.GetOutputs() {\n\t\tif output.GetAddress().IsSameAs(adr) {\n\t\t\toutput.SetAmount(output.GetAmount() - transfee)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s is not an output to the transaction.\", address)\n}\n\nfunc (w *Wallet) SignTransaction(name string) error {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\tif err := checkFee(trans); err != nil {\n\t\treturn err\n\t}\n\n\tdata, err := trans.MarshalBinarySig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trcds := trans.GetRCDs()\n\tif len(rcds) == 0 {\n\t\treturn ErrTXNoInputs\n\t}\n\tfor i, rcd := range rcds {\n\t\ta, err := rcd.GetAddress()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := w.GetFCTAddress(primitives.ConvertFctAddressToUserStr(a))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsig := factoid.NewSingleSignatureBlock(f.SecBytes(), data)\n\t\ttrans.SetSignatureBlock(i, sig)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wallet) GetTransactions() map[string]*factoid.Transaction {\n\treturn w.transactions\n}\n\nfunc (w *Wallet) ComposeTransaction(name string) (*factom.JSON2Request, error) {\n\tif _, exists := w.transactions[name]; !exists {\n\t\treturn nil, ErrTXNotExists\n\t}\n\ttrans := w.transactions[name]\n\n\ttype txreq struct {\n\t\tTransaction string `json:\"transaction\"`\n\t}\n\n\tparam := new(txreq)\n\tif p, err := trans.MarshalBinary(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tparam.Transaction = hex.EncodeToString(p)\n\t}\n\n\treq := factom.NewJSON2Request(\"factoid-submit\", apiCounter(), param)\n\n\treturn req, nil\n}\n\nfunc checkFee(t *factoid.Transaction) error {\n\tins, err := t.TotalInputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\touts, err := t.TotalOutputs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tecs, err := t.TotalECs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fee is the fee that will be paid\n\tfee := int64(ins) - int64(outs) - int64(ecs)\n\t\n\tif fee <= 0 {\n\t\treturn ErrFeeTooLow\n\t}\n\n\trate, err := factom.GetRate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ cfee is the fee calculated for the transaction\n\tvar cfee int64\n\tif c, err := t.CalculateFee(rate); err != nil {\n\t\treturn err\n\t} else if c == 0 {\n\t\treturn errors.New(\"wallet: Could not calculate fee\")\n\t} else {\n\t\tcfee = int64(c)\n\t}\n\n\t\/\/ fee is too low\n\tif fee < cfee {\n\t\treturn ErrFeeTooLow\n\t}\n\n\t\/\/ fee is too high (over 10x cfee)\n\tif fee >= cfee*10 {\n\t\treturn ErrFeeTooHigh\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package model_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/crezam\/actions-on-google-golang\/internal\/test\"\n\t\"github.com\/crezam\/actions-on-google-golang\/model\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRequest1Parsing(t *testing.T) {\n\n\tvar req model.ApiAiRequest\n\n\tfile, _ := os.Open(\".\/data\/sample_request1.json\")\n\tdec := json.NewDecoder(file)\n\n\terr := dec.Decode(&req)\n\n\t\/\/ test if any issues decoding file\n\ttest.Ok(t, err)\n\n\t\/\/ assert correct parsing\n\ttest.Equals(t, \"209eefa7-adb5-4d03-a8b9-9f7ae68a0c11\", req.Id)\n\n\texpectedTimestamp, _ := time.Parse(time.RFC3339Nano, \"2016-10-10T07:41:40.098Z\")\n\ttest.Equals(t, expectedTimestamp, req.Timestamp)\n\n\ttest.Equals(t, \"Hi, my name is Sam!\", req.Result.ResolvedQuery)\n\ttest.Equals(t, \"agent\", req.Result.Source)\n\ttest.Equals(t, \"greetings\", req.Result.Action)\n\ttest.Equals(t, false, req.Result.ActionIncomplete)\n\ttest.Equals(t, \"Sam\", req.Result.Parameters[\"user_name\"])\n\ttest.Equals(t, \"\", req.Result.Parameters[\"school\"])\n\n\ttest.Equals(t, \"greetings\", req.Result.Contexts[0].Name)\n\ttest.Equals(t, \"Sam\", req.Result.Contexts[0].Parameters[\"user_name\"])\n\ttest.Equals(t, \"Sam!\", req.Result.Contexts[0].Parameters[\"user_name.original\"])\n\n\ttest.Equals(t, \"373a354b-c15a-4a60-ac9d-a9f2aee76cb4\", req.Result.Metadata.IntentID)\n\ttest.Equals(t, \"true\", req.Result.Metadata.WebhookUsed)\n\ttest.Equals(t, \"greetings\", req.Result.Metadata.IntentName)\n\n\ttest.Equals(t, \"Nice to meet you, Sam!\", req.Result.Fulfillment.Speech)\n\n\ttest.Equals(t, float64(1), req.Result.Score)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.UserID)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.DisplayName)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.GivenName)\n\ttest.Equals(t, \"Johnson\", req.OriginalRequest.Data.User.Profile.FamilyName)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.AccessToken)\n\n\ttest.Equals(t, 123.456, req.OriginalRequest.Data.Device.Location.Coordinates.Latitude)\n\ttest.Equals(t, -123.456, req.OriginalRequest.Data.Device.Location.Coordinates.Longitude)\n\n\ttest.Equals(t, \"1234 Random Road, Anytown, CA 12345, United States\", req.OriginalRequest.Data.Device.Location.FormattedAddress)\n\ttest.Equals(t, \"12345\", req.OriginalRequest.Data.Device.Location.ZipCode)\n\ttest.Equals(t, \"Anytown\", req.OriginalRequest.Data.Device.Location.City)\n\n\ttest.Equals(t, 200, req.Status.Code)\n\ttest.Equals(t, \"success\", req.Status.ErrorType)\n\n\ttest.Equals(t, \"37151f7c-a409-48b8-9890-cd980cd2548e\", req.SessionID)\n}\n\nfunc TestRequest2Parsing(t *testing.T) {\n\n\tvar req model.ApiAiRequest\n\n\tfile, _ := os.Open(\".\/data\/sample_request2.json\")\n\tdec := json.NewDecoder(file)\n\n\terr := dec.Decode(&req)\n\n\t\/\/ test if any issues decoding file\n\ttest.Ok(t, err)\n\n\t\/\/ assert correct parsing\n\ttest.Equals(t, \"f4b72ee9-cabb-4acd-af9b-2d2cb6ff53d2\", req.Id)\n\n\texpectedTimestamp, _ := time.Parse(time.RFC3339Nano, \"2016-12-24T07:23:46.64Z\")\n\ttest.Equals(t, expectedTimestamp, req.Timestamp)\n\n}\n\n<commit_msg>Start asserting elements<commit_after>package model_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/crezam\/actions-on-google-golang\/internal\/test\"\n\t\"github.com\/crezam\/actions-on-google-golang\/model\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRequest1Parsing(t *testing.T) {\n\n\tvar req model.ApiAiRequest\n\n\tfile, _ := os.Open(\".\/data\/sample_request1.json\")\n\tdec := json.NewDecoder(file)\n\n\terr := dec.Decode(&req)\n\n\t\/\/ test if any issues decoding file\n\ttest.Ok(t, err)\n\n\t\/\/ assert correct parsing\n\ttest.Equals(t, \"209eefa7-adb5-4d03-a8b9-9f7ae68a0c11\", req.Id)\n\n\texpectedTimestamp, _ := time.Parse(time.RFC3339Nano, \"2016-10-10T07:41:40.098Z\")\n\ttest.Equals(t, expectedTimestamp, req.Timestamp)\n\n\ttest.Equals(t, \"Hi, my name is Sam!\", req.Result.ResolvedQuery)\n\ttest.Equals(t, \"agent\", req.Result.Source)\n\ttest.Equals(t, \"greetings\", req.Result.Action)\n\ttest.Equals(t, false, req.Result.ActionIncomplete)\n\ttest.Equals(t, \"Sam\", req.Result.Parameters[\"user_name\"])\n\ttest.Equals(t, \"\", req.Result.Parameters[\"school\"])\n\n\ttest.Equals(t, \"greetings\", req.Result.Contexts[0].Name)\n\ttest.Equals(t, \"Sam\", req.Result.Contexts[0].Parameters[\"user_name\"])\n\ttest.Equals(t, \"Sam!\", req.Result.Contexts[0].Parameters[\"user_name.original\"])\n\n\ttest.Equals(t, \"373a354b-c15a-4a60-ac9d-a9f2aee76cb4\", req.Result.Metadata.IntentID)\n\ttest.Equals(t, \"true\", req.Result.Metadata.WebhookUsed)\n\ttest.Equals(t, \"greetings\", req.Result.Metadata.IntentName)\n\n\ttest.Equals(t, \"Nice to meet you, Sam!\", req.Result.Fulfillment.Speech)\n\n\ttest.Equals(t, float64(1), req.Result.Score)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.UserID)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.DisplayName)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.GivenName)\n\ttest.Equals(t, \"Johnson\", req.OriginalRequest.Data.User.Profile.FamilyName)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.AccessToken)\n\n\ttest.Equals(t, 123.456, req.OriginalRequest.Data.Device.Location.Coordinates.Latitude)\n\ttest.Equals(t, -123.456, req.OriginalRequest.Data.Device.Location.Coordinates.Longitude)\n\n\ttest.Equals(t, \"1234 Random Road, Anytown, CA 12345, United States\", req.OriginalRequest.Data.Device.Location.FormattedAddress)\n\ttest.Equals(t, \"12345\", req.OriginalRequest.Data.Device.Location.ZipCode)\n\ttest.Equals(t, \"Anytown\", req.OriginalRequest.Data.Device.Location.City)\n\n\ttest.Equals(t, 200, req.Status.Code)\n\ttest.Equals(t, \"success\", req.Status.ErrorType)\n\n\ttest.Equals(t, \"37151f7c-a409-48b8-9890-cd980cd2548e\", req.SessionID)\n}\n\nfunc TestRequest2Parsing(t *testing.T) {\n\n\tvar req model.ApiAiRequest\n\n\tfile, _ := os.Open(\".\/data\/sample_request2.json\")\n\tdec := json.NewDecoder(file)\n\n\terr := dec.Decode(&req)\n\n\t\/\/ test if any issues decoding file\n\ttest.Ok(t, err)\n\n\t\/\/ assert correct parsing\n\ttest.Equals(t, \"f4b72ee9-cabb-4acd-af9b-2d2cb6ff53d2\", req.Id)\n\n\texpectedTimestamp, _ := time.Parse(time.RFC3339Nano, \"2016-12-24T07:23:46.64Z\")\n\ttest.Equals(t, expectedTimestamp, req.Timestamp)\n\n\ttest.Equals(t, \"i wonder if you are ready for me?\", req.Result.ResolvedQuery)\n\ttest.Equals(t, \"agent\", req.Result.Source)\n\ttest.Equals(t, \"get_status\", req.Result.Action)\n\ttest.Equals(t, false, req.Result.ActionIncomplete)\n\ttest.Equals(t, map[string]string{}, req.Result.Parameters)\n\n}\n\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/pborman\/uuid\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestToList(t *testing.T) {\n\tassert := assert.New(t)\n\n\ts := \"a,b,c\"\n\tl := toList(s)\n\n\tassert.EqualValues(\"a\", l[0])\n\tassert.EqualValues(\"b\", l[1])\n\tassert.EqualValues(\"c\", l[2])\n\tassert.Equal(3, len(l))\n\n\ts = \"\"\n\tl = toList(s)\n\tassert.Equal(0, len(l))\n}\n\nfunc TestToMap(t *testing.T) {\n\tassert := assert.New(t)\n\n\ts := []string{\"a:a\", \"b:b\", \"c:c\"}\n\tl := toMap(s)\n\n\tassert.EqualValues(\"a\", l[\"a\"])\n\tassert.EqualValues(\"b\", l[\"b\"])\n\tassert.EqualValues(\"c\", l[\"c\"])\n}\n\nfunc TestToUUIDList(t *testing.T) {\n\tassert := assert.New(t)\n\n\ts := \"\"\n\tnids := toUUIDList(s)\n\tassert.Equal(0, len(nids))\n\n\tids := []uuid.UUID{uuid.NewUUID(), uuid.NewUUID()}\n\ts = ids[0].String() + \",\" + ids[1].String()\n\n\tnids = toUUIDList(s)\n\n\tassert.Equal(2, len(nids))\n\tassert.Equal(ids[0], nids[0])\n\tassert.Equal(ids[1], nids[1])\n}\n<commit_msg>update models test<commit_after>package models\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/pborman\/uuid\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestToList(t *testing.T) {\n\tassert := assert.New(t)\n\n\ts := \"a,b,c\"\n\tl := ToList(s)\n\n\tassert.EqualValues(\"a\", l[0])\n\tassert.EqualValues(\"b\", l[1])\n\tassert.EqualValues(\"c\", l[2])\n\tassert.Equal(3, len(l))\n\n\ts = \"\"\n\tl = ToList(s)\n\tassert.Equal(0, len(l))\n}\n\nfunc TestToMap(t *testing.T) {\n\tassert := assert.New(t)\n\n\ts := []string{\"a:a\", \"b:b\", \"c:c\"}\n\tl := toMap(s)\n\n\tassert.EqualValues(\"a\", l[\"a\"])\n\tassert.EqualValues(\"b\", l[\"b\"])\n\tassert.EqualValues(\"c\", l[\"c\"])\n}\n\nfunc TestToUUIDList(t *testing.T) {\n\tassert := assert.New(t)\n\n\ts := \"\"\n\tnids := toUUIDList(s)\n\tassert.Equal(0, len(nids))\n\n\tids := []uuid.UUID{uuid.NewUUID(), uuid.NewUUID()}\n\ts = ids[0].String() + \",\" + ids[1].String()\n\n\tnids = toUUIDList(s)\n\n\tassert.Equal(2, len(nids))\n\tassert.Equal(ids[0], nids[0])\n\tassert.Equal(ids[1], nids[1])\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-nntp\"\n\t\"github.com\/dustin\/go-nntp\/server\"\n\n\t\"code.google.com\/p\/dsallings-couch-go\"\n)\n\nvar groupCacheTimeout = flag.Int(\"groupTimeout\", 60,\n\t\"Time (in seconds), group cache is valid\")\n\ntype GroupRow struct {\n\tGroup string `json:\"key\"`\n\tValue []interface{} `json:\"value\"`\n}\n\ntype GroupResults struct {\n\tRows []GroupRow\n}\n\ntype Attachment struct {\n\tType string `json:\"content-type\"`\n\tData []byte `json:\"data\"`\n}\n\nfunc removeSpace(r rune) rune {\n\tif r == ' ' || r == '\\n' || r == '\\r' {\n\t\treturn -1\n\t}\n\treturn r\n}\n\nfunc (a *Attachment) MarshalJSON() ([]byte, error) {\n\tm := map[string]string{\n\t\t\"content_type\": a.Type,\n\t\t\"data\": strings.Map(removeSpace, base64.StdEncoding.EncodeToString(a.Data)),\n\t}\n\treturn json.Marshal(m)\n}\n\ntype Article struct {\n\tMsgId string `json:\"_id\"`\n\tDocType string `json:\"type\"`\n\tHeaders map[string][]string `json:\"headers\"`\n\tBytes int `json:\"bytes\"`\n\tLines int `json:\"lines\"`\n\tNums map[string]int64 `json:\"nums\"`\n\tAttachments map[string]*Attachment `json:\"_attachments\"`\n}\n\ntype ArticleResults struct {\n\tRows []struct {\n\t\tKey []interface{} `json:\"key\"`\n\t\tArticle Article `json:\"doc\"`\n\t}\n}\n\ntype couchBackend struct {\n\tdb *couch.Database\n\tgroups map[string]*nntp.Group\n\tgrouplock sync.Mutex\n}\n\nfunc (cb *couchBackend) clearGroups() {\n\tcb.grouplock.Lock()\n\tdefer cb.grouplock.Unlock()\n\n\tlog.Printf(\"Dumping group cache\")\n\tcb.groups = nil\n}\n\nfunc (cb *couchBackend) fetchGroups() error {\n\tcb.grouplock.Lock()\n\tdefer cb.grouplock.Unlock()\n\n\tif cb.groups != nil {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"Filling group cache\")\n\n\tresults := GroupResults{}\n\terr := cb.db.Query(\"_design\/groups\/_view\/list\", map[string]interface{}{\n\t\t\"group\": true,\n\t}, &results)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcb.groups = make(map[string]*nntp.Group)\n\tfor _, gr := range results.Rows {\n\t\tif gr.Value[0].(string) != \"\" {\n\t\t\tgroup := nntp.Group{\n\t\t\t\tName: gr.Group,\n\t\t\t\tDescription: gr.Value[0].(string),\n\t\t\t\tCount: int64(gr.Value[1].(float64)),\n\t\t\t\tLow: int64(gr.Value[2].(float64)),\n\t\t\t\tHigh: int64(gr.Value[3].(float64)),\n\t\t\t}\n\t\t\tcb.groups[group.Name] = &group\n\t\t}\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(*groupCacheTimeout) * time.Second)\n\t\tcb.clearGroups()\n\t}()\n\n\treturn nil\n}\n\nfunc (cb *couchBackend) ListGroups(max int) ([]*nntp.Group, error) {\n\tif cb.groups == nil {\n\t\tif err := cb.fetchGroups(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\trv := make([]*nntp.Group, 0, len(cb.groups))\n\tfor _, g := range cb.groups {\n\t\trv = append(rv, g)\n\t}\n\treturn rv, nil\n}\n\nfunc (cb *couchBackend) GetGroup(name string) (*nntp.Group, error) {\n\tif cb.groups == nil {\n\t\tif err := cb.fetchGroups(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tg, exists := cb.groups[name]\n\tif !exists {\n\t\treturn nil, nntpserver.NoSuchGroup\n\t}\n\treturn g, nil\n}\n\nfunc (cb *couchBackend) mkArticle(ar Article) *nntp.Article {\n\turl := fmt.Sprintf(\"%s\/%s\/article\", cb.db.DBURL(), cleanupId(ar.MsgId))\n\treturn &nntp.Article{\n\t\tHeader: textproto.MIMEHeader(ar.Headers),\n\t\tBody: &lazyOpener{url, nil, nil},\n\t\tBytes: ar.Bytes,\n\t\tLines: ar.Lines,\n\t}\n}\n\nfunc (cb *couchBackend) GetArticle(group *nntp.Group, id string) (*nntp.Article, error) {\n\tvar ar Article\n\tif intid, err := strconv.ParseInt(id, 10, 64); err == nil {\n\t\tresults := ArticleResults{}\n\t\tcb.db.Query(\"_design\/articles\/_view\/list\", map[string]interface{}{\n\t\t\t\"include_docs\": true,\n\t\t\t\"key\": []interface{}{group.Name, intid},\n\t\t}, &results)\n\n\t\tif len(results.Rows) != 1 {\n\t\t\treturn nil, nntpserver.InvalidArticleNumber\n\t\t}\n\n\t\tar = results.Rows[0].Article\n\t} else {\n\t\terr := cb.db.Retrieve(cleanupId(id), &ar)\n\t\tif err != nil {\n\t\t\treturn nil, nntpserver.InvalidMessageId\n\t\t}\n\t}\n\n\treturn cb.mkArticle(ar), nil\n}\n\nfunc (cb *couchBackend) GetArticles(group *nntp.Group,\n\tfrom, to int64) ([]nntpserver.NumberedArticle, error) {\n\n\trv := make([]nntpserver.NumberedArticle, 0, 100)\n\n\tresults := ArticleResults{}\n\tcb.db.Query(\"_design\/articles\/_view\/list\", map[string]interface{}{\n\t\t\"include_docs\": true,\n\t\t\"start_key\": []interface{}{group.Name, from},\n\t\t\"end_key\": []interface{}{group.Name, to},\n\t}, &results)\n\n\tfor _, r := range results.Rows {\n\t\trv = append(rv, nntpserver.NumberedArticle{\n\t\t\tNum: int64(r.Key[1].(float64)),\n\t\t\tArticle: cb.mkArticle(r.Article),\n\t\t})\n\t}\n\n\treturn rv, nil\n}\n\nfunc (tb *couchBackend) AllowPost() bool {\n\treturn true\n}\n\nfunc cleanupId(msgid string) string {\n\ts := strings.TrimFunc(msgid, func(r rune) bool {\n\t\treturn r == ' ' || r == '<' || r == '>'\n\t})\n\treturn url.QueryEscape(s)\n}\n\nfunc (cb *couchBackend) Post(article *nntp.Article) error {\n\ta := Article{\n\t\tDocType: \"article\",\n\t\tHeaders: map[string][]string(article.Header),\n\t\tNums: make(map[string]int64),\n\t\tMsgId: cleanupId(article.Header.Get(\"Message-Id\")),\n\t\tAttachments: make(map[string]*Attachment),\n\t}\n\n\tb := []byte{}\n\tbuf := bytes.NewBuffer(b)\n\tn, err := io.Copy(buf, article.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Read %d bytes of body\", n)\n\n\tb = buf.Bytes()\n\ta.Bytes = len(b)\n\ta.Lines = bytes.Count(b, []byte{'\\n'})\n\n\ta.Attachments[\"article\"] = &Attachment{\"text\/plain\", b}\n\n\tfor _, g := range strings.Split(article.Header.Get(\"Newsgroups\"), \",\") {\n\t\tg = strings.TrimSpace(g)\n\t\tgroup, err := cb.GetGroup(g)\n\t\tif err == nil {\n\t\t\ta.Nums[g] = atomic.AddInt64(&group.High, 1)\n\t\t} else {\n\t\t\tlog.Printf(\"Error getting group %q: %v\", g, err)\n\t\t}\n\t}\n\n\tif len(a.Nums) == 0 {\n\t\tlog.Printf(\"Found no matching groups in %v\",\n\t\t\tarticle.Header[\"Newsgroups\"])\n\t\treturn nntpserver.PostingFailed\n\t}\n\n\t_, _, err = cb.db.Insert(&a)\n\tif err != nil {\n\t\tlog.Printf(\"error posting article: %v\", err)\n\t\treturn nntpserver.PostingFailed\n\t}\n\n\treturn nil\n}\n\nfunc (tb *couchBackend) Authorized() bool {\n\treturn true\n}\n\nfunc (tb *couchBackend) Authenticate(user, pass string) (nntpserver.Backend, error) {\n\treturn nil, nntpserver.AuthRejected\n}\n\nfunc maybefatal(err error, f string, a ...interface{}) {\n\tif err != nil {\n\t\tlog.Fatalf(f, a...)\n\t}\n}\n\nfunc main() {\n\n\tcouchUrl := flag.String(\"couch\", \"http:\/\/localhost:5984\/news\",\n\t\t\"Couch DB.\")\n\n\tflag.Parse()\n\n\ta, err := net.ResolveTCPAddr(\"tcp\", \":1119\")\n\tmaybefatal(err, \"Error resolving listener: %v\", err)\n\tl, err := net.ListenTCP(\"tcp\", a)\n\tmaybefatal(err, \"Error setting up listener: %v\", err)\n\tdefer l.Close()\n\n\tdb, err := couch.Connect(*couchUrl)\n\tmaybefatal(err, \"Can't connect to the couch: %v\", err)\n\terr = ensureViews(&db)\n\tmaybefatal(err, \"Error setting up views: %v\", err)\n\n\tbackend := couchBackend{\n\t\tdb: &db,\n\t}\n\n\ts := nntpserver.NewServer(&backend)\n\n\tfor {\n\t\tc, err := l.AcceptTCP()\n\t\tmaybefatal(err, \"Error accepting connection: %v\", err)\n\t\tgo s.Process(c)\n\t}\n}\n<commit_msg>Added option to optimistically store articles.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-nntp\"\n\t\"github.com\/dustin\/go-nntp\/server\"\n\n\t\"code.google.com\/p\/dsallings-couch-go\"\n)\n\nvar groupCacheTimeout = flag.Int(\"groupTimeout\", 60,\n\t\"Time (in seconds), group cache is valid\")\nvar optimisticPost = flag.Bool(\"optimistic\", false,\n\t\"Optimistically return success on store before storing\")\n\ntype GroupRow struct {\n\tGroup string `json:\"key\"`\n\tValue []interface{} `json:\"value\"`\n}\n\ntype GroupResults struct {\n\tRows []GroupRow\n}\n\ntype Attachment struct {\n\tType string `json:\"content-type\"`\n\tData []byte `json:\"data\"`\n}\n\nfunc removeSpace(r rune) rune {\n\tif r == ' ' || r == '\\n' || r == '\\r' {\n\t\treturn -1\n\t}\n\treturn r\n}\n\nfunc (a *Attachment) MarshalJSON() ([]byte, error) {\n\tm := map[string]string{\n\t\t\"content_type\": a.Type,\n\t\t\"data\": strings.Map(removeSpace, base64.StdEncoding.EncodeToString(a.Data)),\n\t}\n\treturn json.Marshal(m)\n}\n\ntype Article struct {\n\tMsgId string `json:\"_id\"`\n\tDocType string `json:\"type\"`\n\tHeaders map[string][]string `json:\"headers\"`\n\tBytes int `json:\"bytes\"`\n\tLines int `json:\"lines\"`\n\tNums map[string]int64 `json:\"nums\"`\n\tAttachments map[string]*Attachment `json:\"_attachments\"`\n}\n\ntype ArticleResults struct {\n\tRows []struct {\n\t\tKey []interface{} `json:\"key\"`\n\t\tArticle Article `json:\"doc\"`\n\t}\n}\n\ntype couchBackend struct {\n\tdb *couch.Database\n\tgroups map[string]*nntp.Group\n\tgrouplock sync.Mutex\n}\n\nfunc (cb *couchBackend) clearGroups() {\n\tcb.grouplock.Lock()\n\tdefer cb.grouplock.Unlock()\n\n\tlog.Printf(\"Dumping group cache\")\n\tcb.groups = nil\n}\n\nfunc (cb *couchBackend) fetchGroups() error {\n\tcb.grouplock.Lock()\n\tdefer cb.grouplock.Unlock()\n\n\tif cb.groups != nil {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"Filling group cache\")\n\n\tresults := GroupResults{}\n\terr := cb.db.Query(\"_design\/groups\/_view\/list\", map[string]interface{}{\n\t\t\"group\": true,\n\t}, &results)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcb.groups = make(map[string]*nntp.Group)\n\tfor _, gr := range results.Rows {\n\t\tif gr.Value[0].(string) != \"\" {\n\t\t\tgroup := nntp.Group{\n\t\t\t\tName: gr.Group,\n\t\t\t\tDescription: gr.Value[0].(string),\n\t\t\t\tCount: int64(gr.Value[1].(float64)),\n\t\t\t\tLow: int64(gr.Value[2].(float64)),\n\t\t\t\tHigh: int64(gr.Value[3].(float64)),\n\t\t\t}\n\t\t\tcb.groups[group.Name] = &group\n\t\t}\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(*groupCacheTimeout) * time.Second)\n\t\tcb.clearGroups()\n\t}()\n\n\treturn nil\n}\n\nfunc (cb *couchBackend) ListGroups(max int) ([]*nntp.Group, error) {\n\tif cb.groups == nil {\n\t\tif err := cb.fetchGroups(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\trv := make([]*nntp.Group, 0, len(cb.groups))\n\tfor _, g := range cb.groups {\n\t\trv = append(rv, g)\n\t}\n\treturn rv, nil\n}\n\nfunc (cb *couchBackend) GetGroup(name string) (*nntp.Group, error) {\n\tif cb.groups == nil {\n\t\tif err := cb.fetchGroups(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tg, exists := cb.groups[name]\n\tif !exists {\n\t\treturn nil, nntpserver.NoSuchGroup\n\t}\n\treturn g, nil\n}\n\nfunc (cb *couchBackend) mkArticle(ar Article) *nntp.Article {\n\turl := fmt.Sprintf(\"%s\/%s\/article\", cb.db.DBURL(), cleanupId(ar.MsgId))\n\treturn &nntp.Article{\n\t\tHeader: textproto.MIMEHeader(ar.Headers),\n\t\tBody: &lazyOpener{url, nil, nil},\n\t\tBytes: ar.Bytes,\n\t\tLines: ar.Lines,\n\t}\n}\n\nfunc (cb *couchBackend) GetArticle(group *nntp.Group, id string) (*nntp.Article, error) {\n\tvar ar Article\n\tif intid, err := strconv.ParseInt(id, 10, 64); err == nil {\n\t\tresults := ArticleResults{}\n\t\tcb.db.Query(\"_design\/articles\/_view\/list\", map[string]interface{}{\n\t\t\t\"include_docs\": true,\n\t\t\t\"key\": []interface{}{group.Name, intid},\n\t\t}, &results)\n\n\t\tif len(results.Rows) != 1 {\n\t\t\treturn nil, nntpserver.InvalidArticleNumber\n\t\t}\n\n\t\tar = results.Rows[0].Article\n\t} else {\n\t\terr := cb.db.Retrieve(cleanupId(id), &ar)\n\t\tif err != nil {\n\t\t\treturn nil, nntpserver.InvalidMessageId\n\t\t}\n\t}\n\n\treturn cb.mkArticle(ar), nil\n}\n\nfunc (cb *couchBackend) GetArticles(group *nntp.Group,\n\tfrom, to int64) ([]nntpserver.NumberedArticle, error) {\n\n\trv := make([]nntpserver.NumberedArticle, 0, 100)\n\n\tresults := ArticleResults{}\n\tcb.db.Query(\"_design\/articles\/_view\/list\", map[string]interface{}{\n\t\t\"include_docs\": true,\n\t\t\"start_key\": []interface{}{group.Name, from},\n\t\t\"end_key\": []interface{}{group.Name, to},\n\t}, &results)\n\n\tfor _, r := range results.Rows {\n\t\trv = append(rv, nntpserver.NumberedArticle{\n\t\t\tNum: int64(r.Key[1].(float64)),\n\t\t\tArticle: cb.mkArticle(r.Article),\n\t\t})\n\t}\n\n\treturn rv, nil\n}\n\nfunc (tb *couchBackend) AllowPost() bool {\n\treturn true\n}\n\nfunc cleanupId(msgid string) string {\n\ts := strings.TrimFunc(msgid, func(r rune) bool {\n\t\treturn r == ' ' || r == '<' || r == '>'\n\t})\n\treturn url.QueryEscape(s)\n}\n\nfunc (cb *couchBackend) Post(article *nntp.Article) error {\n\ta := Article{\n\t\tDocType: \"article\",\n\t\tHeaders: map[string][]string(article.Header),\n\t\tNums: make(map[string]int64),\n\t\tMsgId: cleanupId(article.Header.Get(\"Message-Id\")),\n\t\tAttachments: make(map[string]*Attachment),\n\t}\n\n\tb := []byte{}\n\tbuf := bytes.NewBuffer(b)\n\tn, err := io.Copy(buf, article.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Read %d bytes of body\", n)\n\n\tb = buf.Bytes()\n\ta.Bytes = len(b)\n\ta.Lines = bytes.Count(b, []byte{'\\n'})\n\n\ta.Attachments[\"article\"] = &Attachment{\"text\/plain\", b}\n\n\tfor _, g := range strings.Split(article.Header.Get(\"Newsgroups\"), \",\") {\n\t\tg = strings.TrimSpace(g)\n\t\tgroup, err := cb.GetGroup(g)\n\t\tif err == nil {\n\t\t\ta.Nums[g] = atomic.AddInt64(&group.High, 1)\n\t\t} else {\n\t\t\tlog.Printf(\"Error getting group %q: %v\", g, err)\n\t\t}\n\t}\n\n\tif len(a.Nums) == 0 {\n\t\tlog.Printf(\"Found no matching groups in %v\",\n\t\t\tarticle.Header[\"Newsgroups\"])\n\t\treturn nntpserver.PostingFailed\n\t}\n\n\tif *optimisticPost {\n\t\tgo func() {\n\t\t\t_, _, err = cb.db.Insert(&a)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error optimistically posting article: %v\", err)\n\t\t\t}\n\t\t}()\n\t} else {\n\t\t_, _, err = cb.db.Insert(&a)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error posting article: %v\", err)\n\t\t\treturn nntpserver.PostingFailed\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (tb *couchBackend) Authorized() bool {\n\treturn true\n}\n\nfunc (tb *couchBackend) Authenticate(user, pass string) (nntpserver.Backend, error) {\n\treturn nil, nntpserver.AuthRejected\n}\n\nfunc maybefatal(err error, f string, a ...interface{}) {\n\tif err != nil {\n\t\tlog.Fatalf(f, a...)\n\t}\n}\n\nfunc main() {\n\n\tcouchUrl := flag.String(\"couch\", \"http:\/\/localhost:5984\/news\",\n\t\t\"Couch DB.\")\n\n\tflag.Parse()\n\n\ta, err := net.ResolveTCPAddr(\"tcp\", \":1119\")\n\tmaybefatal(err, \"Error resolving listener: %v\", err)\n\tl, err := net.ListenTCP(\"tcp\", a)\n\tmaybefatal(err, \"Error setting up listener: %v\", err)\n\tdefer l.Close()\n\n\tdb, err := couch.Connect(*couchUrl)\n\tmaybefatal(err, \"Can't connect to the couch: %v\", err)\n\terr = ensureViews(&db)\n\tmaybefatal(err, \"Error setting up views: %v\", err)\n\n\tbackend := couchBackend{\n\t\tdb: &db,\n\t}\n\n\ts := nntpserver.NewServer(&backend)\n\n\tfor {\n\t\tc, err := l.AcceptTCP()\n\t\tmaybefatal(err, \"Error accepting connection: %v\", err)\n\t\tgo s.Process(c)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package module3rd\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/cubicdaiya\/nginx-build\/command\"\n\t\"github.com\/cubicdaiya\/nginx-build\/util\"\n)\n\nfunc DownloadAndExtractParallel(m Module3rd, wg *sync.WaitGroup) {\n\tif util.FileExists(m.Name) {\n\t\tlog.Printf(\"%s already exists.\", m.Name)\n\t\twg.Done()\n\t\treturn\n\t}\n\n\tif m.Form != \"local\" {\n\t\tif len(m.Rev) > 0 {\n\t\t\tlog.Printf(\"Download %s-%s.....\", m.Name, m.Rev)\n\t\t} else {\n\t\t\tlog.Printf(\"Download %s.....\", m.Name)\n\t\t}\n\n\t\tlogName := fmt.Sprintf(\"%s.log\", m.Name)\n\n\t\terr := download(m, logName)\n\t\tif err != nil {\n\t\t\tutil.PrintFatalMsg(err, logName)\n\t\t}\n\t} else if !util.FileExists(m.Url) {\n\t\tlog.Fatalf(\"no such directory:%s\", m.Url)\n\t}\n\n\twg.Done()\n}\n\nfunc download(m Module3rd, logName string) error {\n\tform := m.Form\n\turl := m.Url\n\n\tswitch form {\n\tcase \"git\":\n\t\tfallthrough\n\tcase \"hg\":\n\t\targs := []string{form, \"clone\", url}\n\t\tif command.VerboseEnabled {\n\t\t\treturn command.Run(args)\n\t\t}\n\n\t\tf, err := os.Create(logName)\n\t\tif err != nil {\n\t\t\treturn command.Run(args)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tcmd, err := command.Make(args)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twriter := bufio.NewWriter(f)\n\t\tdefer writer.Flush()\n\n\t\tcmd.Stderr = writer\n\n\t\treturn cmd.Run()\n\tcase \"local\":\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"form=%s is not supported\", form)\n}\n<commit_msg>added comment.<commit_after>package module3rd\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/cubicdaiya\/nginx-build\/command\"\n\t\"github.com\/cubicdaiya\/nginx-build\/util\"\n)\n\nfunc DownloadAndExtractParallel(m Module3rd, wg *sync.WaitGroup) {\n\tif util.FileExists(m.Name) {\n\t\tlog.Printf(\"%s already exists.\", m.Name)\n\t\twg.Done()\n\t\treturn\n\t}\n\n\tif m.Form != \"local\" {\n\t\tif len(m.Rev) > 0 {\n\t\t\tlog.Printf(\"Download %s-%s.....\", m.Name, m.Rev)\n\t\t} else {\n\t\t\tlog.Printf(\"Download %s.....\", m.Name)\n\t\t}\n\n\t\tlogName := fmt.Sprintf(\"%s.log\", m.Name)\n\n\t\terr := download(m, logName)\n\t\tif err != nil {\n\t\t\tutil.PrintFatalMsg(err, logName)\n\t\t}\n\t} else if !util.FileExists(m.Url) {\n\t\tlog.Fatalf(\"no such directory:%s\", m.Url)\n\t}\n\n\twg.Done()\n}\n\nfunc download(m Module3rd, logName string) error {\n\tform := m.Form\n\turl := m.Url\n\n\tswitch form {\n\tcase \"git\":\n\t\tfallthrough\n\tcase \"hg\":\n\t\targs := []string{form, \"clone\", url}\n\t\tif command.VerboseEnabled {\n\t\t\treturn command.Run(args)\n\t\t}\n\n\t\tf, err := os.Create(logName)\n\t\tif err != nil {\n\t\t\treturn command.Run(args)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tcmd, err := command.Make(args)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twriter := bufio.NewWriter(f)\n\t\tdefer writer.Flush()\n\n\t\tcmd.Stderr = writer\n\n\t\treturn cmd.Run()\n\tcase \"local\": \/\/ not implemented yet\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"form=%s is not supported\", form)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the kubevirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2017 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\tkubev1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n\n\t\"kubevirt.io\/kubevirt\/pkg\/api\/v1\"\n\t\"kubevirt.io\/kubevirt\/pkg\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-controller\/services\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n)\n\nfunc LibvirtdPodSelector() metav1.ListOptions {\n\tlabelSelector, err := labels.Parse(\"daemon=libvirt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn metav1.ListOptions{LabelSelector: labelSelector.String()}\n}\n\nvar _ = Describe(\"Storage\", func() {\n\n\tflag.Parse()\n\n\tcoreClient, err := kubecli.Get()\n\ttests.PanicOnError(err)\n\n\trestClient, err := kubecli.GetRESTClient()\n\ttests.PanicOnError(err)\n\n\tBeforeEach(func() {\n\t\ttests.BeforeTestCleanup()\n\t})\n\n\tgetTargetLogs := func(tailLines int64) string {\n\t\tpods, err := coreClient.CoreV1().Pods(kubev1.NamespaceDefault).List(metav1.ListOptions{LabelSelector: \"app in (iscsi-demo-target)\"})\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\/\/FIXME Sometimes pods hang in terminating state, select the pod which does not have a deletion timestamp\n\t\tpodName := \"\"\n\t\tfor _, pod := range pods.Items {\n\t\t\tif pod.ObjectMeta.DeletionTimestamp == nil {\n\t\t\t\tpodName = pod.ObjectMeta.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tExpect(podName).ToNot(BeEmpty())\n\n\t\tlogsRaw, err := coreClient.CoreV1().\n\t\t\tPods(\"default\").\n\t\t\tGetLogs(podName,\n\t\t\t\t&kubev1.PodLogOptions{TailLines: &tailLines}).\n\t\t\tDoRaw()\n\t\tExpect(err).To(BeNil())\n\n\t\treturn string(logsRaw)\n\t}\n\n\tBeforeEach(func() {\n\t\t\/\/ Wait until there is no connection\n\t\tlogs := func() string { return getTargetLogs(70) }\n\t\tEventually(logs,\n\t\t\t11*time.Second,\n\t\t\t500*time.Millisecond).\n\t\t\tShould(ContainSubstring(\"I_T nexus information:\\n LUN information:\"))\n\t})\n\n\tRunVMAndExpectLaunch := func(vm *v1.VM) {\n\t\tobj, err := restClient.Post().Resource(\"vms\").Namespace(tests.NamespaceTestDefault).Body(vm).Do().Get()\n\t\tExpect(err).To(BeNil())\n\t\ttests.WaitForSuccessfulVMStart(obj)\n\n\t\t\/\/ Let's get the IP of the pod of the VM\n\t\tpods, err := coreClient.CoreV1().Pods(tests.NamespaceTestDefault).List(services.UnfinishedVMPodSelector(vm))\n\t\t\/\/FIXME Sometimes pods hang in terminating state, select the pod which does not have a deletion timestamp\n\t\thostIP := \"\"\n\t\tfor _, pod := range pods.Items {\n\t\t\tif pod.ObjectMeta.DeletionTimestamp == nil {\n\t\t\t\thostIP = pod.Status.HostIP\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tExpect(hostIP).ToNot(BeEmpty())\n\n\t\t\/\/ Let's get the IP of the pod of libvirtd\n\t\tpods, err = coreClient.CoreV1().Pods(api.NamespaceDefault).List(LibvirtdPodSelector())\n\t\tpodIP := \"\"\n\t\tfor _, pod := range pods.Items {\n\t\t\tif pod.Status.HostIP == hostIP {\n\t\t\t\tpodIP = pod.Status.PodIP\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tExpect(podIP).ToNot(BeEmpty())\n\n\t\t\/\/ Periodically check if we now have a connection on the target\n\t\t\/\/ We don't check against the full pod IP, since depending on the kubernetes proxy mode, we either see the\n\t\t\/\/ full PodIP or just the proxy IP which connects through different ports\n\t\tEventually(func() string { return getTargetLogs(70) },\n\t\t\t11*time.Second,\n\t\t\t500*time.Millisecond).\n\t\t\tShould(ContainSubstring(fmt.Sprintf(\"IP Address: %s\", podIP[0:8])))\n\t}\n\n\tContext(\"Given a fresh iSCSI target\", func() {\n\n\t\tIt(\"should be available and ready\", func() {\n\t\t\tlogs := getTargetLogs(70)\n\t\t\tExpect(logs).To(ContainSubstring(\"Target 1: iqn.2017-01.io.kubevirt:sn.42\"))\n\t\t\tExpect(logs).To(ContainSubstring(\"Driver: iscsi\"))\n\t\t\tExpect(logs).To(ContainSubstring(\"State: ready\"))\n\t\t})\n\n\t\tIt(\"should not have any connections\", func() {\n\t\t\tlogs := getTargetLogs(70)\n\t\t\t\/\/ Ensure that no connections are listed\n\t\t\tExpect(logs).To(ContainSubstring(\"I_T nexus information:\\n LUN information:\"))\n\t\t})\n\t})\n\n\tContext(\"Given a VM and a directly connected Alpine LUN\", func() {\n\n\t\tIt(\"should be successfully started by libvirt\", func(done Done) {\n\t\t\t\/\/ Start the VM with the LUN attached\n\t\t\tvm := tests.NewRandomVMWithDirectLun(2)\n\t\t\tRunVMAndExpectLaunch(vm)\n\t\t\tclose(done)\n\t\t}, 30)\n\t})\n\n\tContext(\"Given a VM and an Alpine PVC\", func() {\n\t\tIt(\"should be successfully started by libvirt\", func(done Done) {\n\t\t\t\/\/ Start the VM with the PVC attached\n\t\t\tvm := tests.NewRandomVMWithPVC(\"disk-alpine\")\n\t\t\tRunVMAndExpectLaunch(vm)\n\t\t\tclose(done)\n\t\t}, 30)\n\t})\n})\n<commit_msg>Don't check for exact IPs in storage tests<commit_after>\/*\n * This file is part of the kubevirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2017 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tkubev1 \"k8s.io\/client-go\/pkg\/api\/v1\"\n\n\t\"kubevirt.io\/kubevirt\/pkg\/api\/v1\"\n\t\"kubevirt.io\/kubevirt\/pkg\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n)\n\nvar _ = Describe(\"Storage\", func() {\n\n\tflag.Parse()\n\n\tcoreClient, err := kubecli.Get()\n\ttests.PanicOnError(err)\n\n\trestClient, err := kubecli.GetRESTClient()\n\ttests.PanicOnError(err)\n\n\tBeforeEach(func() {\n\t\ttests.BeforeTestCleanup()\n\t})\n\n\tgetTargetLogs := func(tailLines int64) string {\n\t\tpods, err := coreClient.CoreV1().Pods(kubev1.NamespaceDefault).List(metav1.ListOptions{LabelSelector: \"app in (iscsi-demo-target)\"})\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\/\/FIXME Sometimes pods hang in terminating state, select the pod which does not have a deletion timestamp\n\t\tpodName := \"\"\n\t\tfor _, pod := range pods.Items {\n\t\t\tif pod.ObjectMeta.DeletionTimestamp == nil {\n\t\t\t\tpodName = pod.ObjectMeta.Name\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tExpect(podName).ToNot(BeEmpty())\n\n\t\tlogsRaw, err := coreClient.CoreV1().\n\t\t\tPods(\"default\").\n\t\t\tGetLogs(podName,\n\t\t\t\t&kubev1.PodLogOptions{TailLines: &tailLines}).\n\t\t\tDoRaw()\n\t\tExpect(err).To(BeNil())\n\n\t\treturn string(logsRaw)\n\t}\n\n\tBeforeEach(func() {\n\t\t\/\/ Wait until there is no connection\n\t\tlogs := func() string { return getTargetLogs(70) }\n\t\tEventually(logs,\n\t\t\t11*time.Second,\n\t\t\t500*time.Millisecond).\n\t\t\tShould(ContainSubstring(\"I_T nexus information:\\n LUN information:\"))\n\t})\n\n\tRunVMAndExpectLaunch := func(vm *v1.VM) {\n\t\tobj, err := restClient.Post().Resource(\"vms\").Namespace(tests.NamespaceTestDefault).Body(vm).Do().Get()\n\t\tExpect(err).To(BeNil())\n\t\ttests.WaitForSuccessfulVMStart(obj)\n\n\t\t\/\/ Periodically check if we now have a connection on the target\n\t\t\/\/ We don't check against the actual IP, since depending on the kubernetes proxy mode, and the network provider\n\t\t\/\/ we will see different IPs here. The BeforeEach function makes sure that no other connections exist.\n\t\tEventually(func() string { return getTargetLogs(70) },\n\t\t\t11*time.Second,\n\t\t\t500*time.Millisecond).\n\t\t\tShould(\n\t\t\t\tMatchRegexp(fmt.Sprintf(\"IP Address: [0-9]+\\\\.[0-9]+\\\\.[0-9]+\\\\.[0-9]+\")),\n\t\t\t)\n\t}\n\n\tContext(\"Given a fresh iSCSI target\", func() {\n\n\t\tIt(\"should be available and ready\", func() {\n\t\t\tlogs := getTargetLogs(70)\n\t\t\tExpect(logs).To(ContainSubstring(\"Target 1: iqn.2017-01.io.kubevirt:sn.42\"))\n\t\t\tExpect(logs).To(ContainSubstring(\"Driver: iscsi\"))\n\t\t\tExpect(logs).To(ContainSubstring(\"State: ready\"))\n\t\t})\n\n\t\tIt(\"should not have any connections\", func() {\n\t\t\tlogs := getTargetLogs(70)\n\t\t\t\/\/ Ensure that no connections are listed\n\t\t\tExpect(logs).To(ContainSubstring(\"I_T nexus information:\\n LUN information:\"))\n\t\t})\n\t})\n\n\tContext(\"Given a VM and a directly connected Alpine LUN\", func() {\n\n\t\tIt(\"should be successfully started by libvirt\", func(done Done) {\n\t\t\t\/\/ Start the VM with the LUN attached\n\t\t\tvm := tests.NewRandomVMWithDirectLun(2)\n\t\t\tRunVMAndExpectLaunch(vm)\n\t\t\tclose(done)\n\t\t}, 30)\n\t})\n\n\tContext(\"Given a VM and an Alpine PVC\", func() {\n\t\tIt(\"should be successfully started by libvirt\", func(done Done) {\n\t\t\t\/\/ Start the VM with the PVC attached\n\t\t\tvm := tests.NewRandomVMWithPVC(\"disk-alpine\")\n\t\t\tRunVMAndExpectLaunch(vm)\n\t\t\tclose(done)\n\t\t}, 30)\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst escape = \"\\033\"\n\n\/\/ ColorAttribute defines a single SGR Code\ntype ColorAttribute int\n\n\/\/ Base ColorAttributes\nconst (\n\tReset ColorAttribute = iota\n\tBold\n\tFaint\n\tItalic\n\tUnderline\n\tBlinkSlow\n\tBlinkRapid\n\tReverseVideo\n\tConcealed\n\tCrossedOut\n)\n\n\/\/ Foreground text colors\nconst (\n\tFgBlack ColorAttribute = iota + 30\n\tFgRed\n\tFgGreen\n\tFgYellow\n\tFgBlue\n\tFgMagenta\n\tFgCyan\n\tFgWhite\n)\n\n\/\/ Foreground Hi-Intensity text colors\nconst (\n\tFgHiBlack ColorAttribute = iota + 90\n\tFgHiRed\n\tFgHiGreen\n\tFgHiYellow\n\tFgHiBlue\n\tFgHiMagenta\n\tFgHiCyan\n\tFgHiWhite\n)\n\n\/\/ Background text colors\nconst (\n\tBgBlack ColorAttribute = iota + 40\n\tBgRed\n\tBgGreen\n\tBgYellow\n\tBgBlue\n\tBgMagenta\n\tBgCyan\n\tBgWhite\n)\n\n\/\/ Background Hi-Intensity text colors\nconst (\n\tBgHiBlack ColorAttribute = iota + 100\n\tBgHiRed\n\tBgHiGreen\n\tBgHiYellow\n\tBgHiBlue\n\tBgHiMagenta\n\tBgHiCyan\n\tBgHiWhite\n)\n\nvar colorAttributeToString = map[ColorAttribute]string{\n\tReset: \"Reset\",\n\tBold: \"Bold\",\n\tFaint: \"Faint\",\n\tItalic: \"Italic\",\n\tUnderline: \"Underline\",\n\tBlinkSlow: \"BlinkSlow\",\n\tBlinkRapid: \"BlinkRapid\",\n\tReverseVideo: \"ReverseVideo\",\n\tConcealed: \"Concealed\",\n\tCrossedOut: \"CrossedOut\",\n\tFgBlack: \"FgBlack\",\n\tFgRed: \"FgRed\",\n\tFgGreen: \"FgGreen\",\n\tFgYellow: \"FgYellow\",\n\tFgBlue: \"FgBlue\",\n\tFgMagenta: \"FgMagenta\",\n\tFgCyan: \"FgCyan\",\n\tFgWhite: \"FgWhite\",\n\tFgHiBlack: \"FgHiBlack\",\n\tFgHiRed: \"FgHiRed\",\n\tFgHiGreen: \"FgHiGreen\",\n\tFgHiYellow: \"FgHiYellow\",\n\tFgHiBlue: \"FgHiBlue\",\n\tFgHiMagenta: \"FgHiMagenta\",\n\tFgHiCyan: \"FgHiCyan\",\n\tFgHiWhite: \"FgHiWhite\",\n\tBgBlack: \"BgBlack\",\n\tBgRed: \"BgRed\",\n\tBgGreen: \"BgGreen\",\n\tBgYellow: \"BgYellow\",\n\tBgBlue: \"BgBlue\",\n\tBgMagenta: \"BgMagenta\",\n\tBgCyan: \"BgCyan\",\n\tBgWhite: \"BgWhite\",\n\tBgHiBlack: \"BgHiBlack\",\n\tBgHiRed: \"BgHiRed\",\n\tBgHiGreen: \"BgHiGreen\",\n\tBgHiYellow: \"BgHiYellow\",\n\tBgHiBlue: \"BgHiBlue\",\n\tBgHiMagenta: \"BgHiMagenta\",\n\tBgHiCyan: \"BgHiCyan\",\n\tBgHiWhite: \"BgHiWhite\",\n}\n\nfunc (c *ColorAttribute) String() string {\n\treturn colorAttributeToString[*c]\n}\n\nvar colorAttributeFromString = map[string]ColorAttribute{}\n\n\/\/ ColorAttributeFromString will return a ColorAttribute given a string\nfunc ColorAttributeFromString(from string) ColorAttribute {\n\tlowerFrom := strings.TrimSpace(strings.ToLower(from))\n\treturn colorAttributeFromString[lowerFrom]\n}\n\n\/\/ ColorString converts a list of ColorAttributes to a color string\nfunc ColorString(attrs ...ColorAttribute) string {\n\treturn string(ColorBytes(attrs...))\n}\n\n\/\/ ColorBytes converts a list of ColorAttributes to a byte array\nfunc ColorBytes(attrs ...ColorAttribute) []byte {\n\tbytes := make([]byte, 0, 20)\n\tbytes = append(bytes, escape[0], '[')\n\tif len(attrs) > 0 {\n\t\tbytes = append(bytes, strconv.Itoa(int(attrs[0]))...)\n\t\tfor _, a := range attrs[1:] {\n\t\t\tbytes = append(bytes, ';')\n\t\t\tbytes = append(bytes, strconv.Itoa(int(a))...)\n\t\t}\n\t} else {\n\t\tbytes = append(bytes, strconv.Itoa(int(Bold))...)\n\t}\n\tbytes = append(bytes, 'm')\n\treturn bytes\n}\n\nvar levelToColor = map[Level]string{\n\tTRACE: ColorString(Bold, FgCyan),\n\tDEBUG: ColorString(Bold, FgBlue),\n\tINFO: ColorString(Bold, FgGreen),\n\tWARN: ColorString(Bold, FgYellow),\n\tERROR: ColorString(Bold, FgRed),\n\tCRITICAL: ColorString(Bold, BgMagenta),\n\tFATAL: ColorString(Bold, BgRed),\n\tNONE: ColorString(Reset),\n}\n\nvar resetBytes = ColorBytes(Reset)\nvar fgCyanBytes = ColorBytes(FgCyan)\nvar fgGreenBytes = ColorBytes(FgGreen)\nvar fgBoldBytes = ColorBytes(Bold)\n\ntype protectedANSIWriterMode int\n\nconst (\n\tescapeAll protectedANSIWriterMode = iota\n\tallowColor\n\tremoveColor\n)\n\ntype protectedANSIWriter struct {\n\tw io.Writer\n\tmode protectedANSIWriterMode\n}\n\n\/\/ Write will protect against unusual characters\nfunc (c *protectedANSIWriter) Write(bytes []byte) (int, error) {\n\tend := len(bytes)\n\ttotalWritten := 0\nnormalLoop:\n\tfor i := 0; i < end; {\n\t\tlasti := i\n\n\t\tif c.mode == escapeAll {\n\t\t\tfor i < end && (bytes[i] >= ' ' || bytes[i] == '\\n' || bytes[i] == '\\t') {\n\t\t\t\ti++\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Allow tabs if we're not escaping everything\n\t\t\tfor i < end && (bytes[i] >= ' ' || bytes[i] == '\\t') {\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\n\t\tif i > lasti {\n\t\t\twritten, err := c.w.Write(bytes[lasti:i])\n\t\t\ttotalWritten += written\n\t\t\tif err != nil {\n\t\t\t\treturn totalWritten, err\n\t\t\t}\n\n\t\t}\n\t\tif i >= end {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ If we're not just escaping all we should prefix all newlines with a \\t\n\t\tif c.mode != escapeAll {\n\t\t\tif bytes[i] == '\\n' {\n\t\t\t\twritten, err := c.w.Write([]byte{'\\n', '\\t'})\n\t\t\t\tif written > 0 {\n\t\t\t\t\ttotalWritten++\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn totalWritten, err\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\tcontinue normalLoop\n\t\t\t}\n\n\t\t\tif bytes[i] == escape[0] && i+1 < end && bytes[i+1] == '[' {\n\t\t\t\tfor j := i + 2; j < end; j++ {\n\t\t\t\t\tif bytes[j] >= '0' && bytes[j] <= '9' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif bytes[j] == ';' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif bytes[j] == 'm' {\n\t\t\t\t\t\tif c.mode == allowColor {\n\t\t\t\t\t\t\twritten, err := c.w.Write(bytes[i : j+1])\n\t\t\t\t\t\t\ttotalWritten += written\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn totalWritten, err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttotalWritten = j\n\t\t\t\t\t\t}\n\t\t\t\t\t\ti = j + 1\n\t\t\t\t\t\tcontinue normalLoop\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Process naughty character\n\t\tif _, err := fmt.Fprintf(c.w, `\\%#o03d`, bytes[i]); err != nil {\n\t\t\treturn totalWritten, err\n\t\t}\n\t\ti++\n\t\ttotalWritten++\n\t}\n\treturn totalWritten, nil\n}\n\n\/\/ ColorSprintf returns a colored string from a format and arguments\n\/\/ arguments will be wrapped in ColoredValues to protect against color spoofing\nfunc ColorSprintf(format string, args ...interface{}) string {\n\tif len(args) > 0 {\n\t\tv := make([]interface{}, len(args))\n\t\tfor i := 0; i < len(v); i++ {\n\t\t\tv[i] = NewColoredValuePointer(&args[i])\n\t\t}\n\t\treturn fmt.Sprintf(format, v...)\n\t}\n\treturn format\n}\n\n\/\/ ColorFprintf will write to the provided writer similar to ColorSprintf\nfunc ColorFprintf(w io.Writer, format string, args ...interface{}) (int, error) {\n\tif len(args) > 0 {\n\t\tv := make([]interface{}, len(args))\n\t\tfor i := 0; i < len(v); i++ {\n\t\t\tv[i] = NewColoredValuePointer(&args[i])\n\t\t}\n\t\treturn fmt.Fprintf(w, format, v...)\n\t}\n\treturn fmt.Fprint(w, format)\n}\n\n\/\/ ColorFormatted structs provide their own colored string when formatted with ColorSprintf\ntype ColorFormatted interface {\n\t\/\/ ColorFormat provides the colored representation of the value\n\tColorFormat(s fmt.State)\n}\n\nvar colorFormattedType = reflect.TypeOf((*ColorFormatted)(nil)).Elem()\n\n\/\/ ColoredValue will Color the provided value\ntype ColoredValue struct {\n\tcolorBytes *[]byte\n\tresetBytes *[]byte\n\tValue *interface{}\n}\n\n\/\/ NewColoredValue is a helper function to create a ColoredValue from a Value\n\/\/ If no color is provided it defaults to Bold with standard Reset\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredValue(value interface{}, color ...ColorAttribute) *ColoredValue {\n\treturn NewColoredValuePointer(&value, color...)\n}\n\n\/\/ NewColoredValuePointer is a helper function to create a ColoredValue from a Value Pointer\n\/\/ If no color is provided it defaults to Bold with standard Reset\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredValuePointer(value *interface{}, color ...ColorAttribute) *ColoredValue {\n\tif val, ok := (*value).(*ColoredValue); ok {\n\t\treturn val\n\t}\n\tif len(color) > 0 {\n\t\tbytes := ColorBytes(color...)\n\t\treturn &ColoredValue{\n\t\t\tcolorBytes: &bytes,\n\t\t\tresetBytes: &resetBytes,\n\t\t\tValue: value,\n\t\t}\n\t}\n\treturn &ColoredValue{\n\t\tcolorBytes: &fgBoldBytes,\n\t\tresetBytes: &resetBytes,\n\t\tValue: value,\n\t}\n\n}\n\n\/\/ NewColoredValueBytes creates a value from the provided value with color bytes\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredValueBytes(value interface{}, colorBytes *[]byte) *ColoredValue {\n\tif val, ok := value.(*ColoredValue); ok {\n\t\treturn val\n\t}\n\treturn &ColoredValue{\n\t\tcolorBytes: colorBytes,\n\t\tresetBytes: &resetBytes,\n\t\tValue: &value,\n\t}\n}\n\n\/\/ NewColoredIDValue is a helper function to create a ColoredValue from a Value\n\/\/ The Value will be colored with FgCyan\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredIDValue(value interface{}) *ColoredValue {\n\treturn NewColoredValueBytes(&value, &fgCyanBytes)\n}\n\n\/\/ Format will format the provided value and protect against ANSI color spoofing within the value\n\/\/ If the wrapped value is ColorFormatted and the format is \"%-v\" then its ColorString will\n\/\/ be used. It is presumed that this ColorString is safe.\nfunc (cv *ColoredValue) Format(s fmt.State, c rune) {\n\tif c == 'v' && s.Flag('-') {\n\t\tif val, ok := (*cv.Value).(ColorFormatted); ok {\n\t\t\tval.ColorFormat(s)\n\t\t\treturn\n\t\t}\n\t\tv := reflect.ValueOf(*cv.Value)\n\t\tt := v.Type()\n\n\t\tif reflect.PtrTo(t).Implements(colorFormattedType) {\n\t\t\tvp := reflect.New(t)\n\t\t\tvp.Elem().Set(v)\n\t\t\tval := vp.Interface().(ColorFormatted)\n\t\t\tval.ColorFormat(s)\n\t\t\treturn\n\t\t}\n\t}\n\ts.Write(*cv.colorBytes)\n\tfmt.Fprintf(&protectedANSIWriter{w: s}, fmtString(s, c), *(cv.Value))\n\ts.Write(*cv.resetBytes)\n}\n\n\/\/ SetColorBytes will allow a user to set the colorBytes of a colored value\nfunc (cv *ColoredValue) SetColorBytes(colorBytes []byte) {\n\tcv.colorBytes = &colorBytes\n}\n\n\/\/ SetColorBytesPointer will allow a user to set the colorBytes pointer of a colored value\nfunc (cv *ColoredValue) SetColorBytesPointer(colorBytes *[]byte) {\n\tcv.colorBytes = colorBytes\n}\n\n\/\/ SetResetBytes will allow a user to set the resetBytes pointer of a colored value\nfunc (cv *ColoredValue) SetResetBytes(resetBytes []byte) {\n\tcv.resetBytes = &resetBytes\n}\n\n\/\/ SetResetBytesPointer will allow a user to set the resetBytes pointer of a colored value\nfunc (cv *ColoredValue) SetResetBytesPointer(resetBytes *[]byte) {\n\tcv.resetBytes = resetBytes\n}\n\nfunc fmtString(s fmt.State, c rune) string {\n\tvar width, precision string\n\tbase := make([]byte, 0, 8)\n\tbase = append(base, '%')\n\tfor _, c := range []byte(\" +-#0\") {\n\t\tif s.Flag(int(c)) {\n\t\t\tbase = append(base, c)\n\t\t}\n\t}\n\tif w, ok := s.Width(); ok {\n\t\twidth = strconv.Itoa(w)\n\t}\n\tif p, ok := s.Precision(); ok {\n\t\tprecision = \".\" + strconv.Itoa(p)\n\t}\n\treturn fmt.Sprintf(\"%s%s%s%c\", base, width, precision, c)\n}\n\nfunc init() {\n\tfor attr, from := range colorAttributeToString {\n\t\tcolorAttributeFromString[strings.ToLower(from)] = attr\n\t}\n}\n<commit_msg>Fix double-indirection bug in logging IDs (#12294)<commit_after>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst escape = \"\\033\"\n\n\/\/ ColorAttribute defines a single SGR Code\ntype ColorAttribute int\n\n\/\/ Base ColorAttributes\nconst (\n\tReset ColorAttribute = iota\n\tBold\n\tFaint\n\tItalic\n\tUnderline\n\tBlinkSlow\n\tBlinkRapid\n\tReverseVideo\n\tConcealed\n\tCrossedOut\n)\n\n\/\/ Foreground text colors\nconst (\n\tFgBlack ColorAttribute = iota + 30\n\tFgRed\n\tFgGreen\n\tFgYellow\n\tFgBlue\n\tFgMagenta\n\tFgCyan\n\tFgWhite\n)\n\n\/\/ Foreground Hi-Intensity text colors\nconst (\n\tFgHiBlack ColorAttribute = iota + 90\n\tFgHiRed\n\tFgHiGreen\n\tFgHiYellow\n\tFgHiBlue\n\tFgHiMagenta\n\tFgHiCyan\n\tFgHiWhite\n)\n\n\/\/ Background text colors\nconst (\n\tBgBlack ColorAttribute = iota + 40\n\tBgRed\n\tBgGreen\n\tBgYellow\n\tBgBlue\n\tBgMagenta\n\tBgCyan\n\tBgWhite\n)\n\n\/\/ Background Hi-Intensity text colors\nconst (\n\tBgHiBlack ColorAttribute = iota + 100\n\tBgHiRed\n\tBgHiGreen\n\tBgHiYellow\n\tBgHiBlue\n\tBgHiMagenta\n\tBgHiCyan\n\tBgHiWhite\n)\n\nvar colorAttributeToString = map[ColorAttribute]string{\n\tReset: \"Reset\",\n\tBold: \"Bold\",\n\tFaint: \"Faint\",\n\tItalic: \"Italic\",\n\tUnderline: \"Underline\",\n\tBlinkSlow: \"BlinkSlow\",\n\tBlinkRapid: \"BlinkRapid\",\n\tReverseVideo: \"ReverseVideo\",\n\tConcealed: \"Concealed\",\n\tCrossedOut: \"CrossedOut\",\n\tFgBlack: \"FgBlack\",\n\tFgRed: \"FgRed\",\n\tFgGreen: \"FgGreen\",\n\tFgYellow: \"FgYellow\",\n\tFgBlue: \"FgBlue\",\n\tFgMagenta: \"FgMagenta\",\n\tFgCyan: \"FgCyan\",\n\tFgWhite: \"FgWhite\",\n\tFgHiBlack: \"FgHiBlack\",\n\tFgHiRed: \"FgHiRed\",\n\tFgHiGreen: \"FgHiGreen\",\n\tFgHiYellow: \"FgHiYellow\",\n\tFgHiBlue: \"FgHiBlue\",\n\tFgHiMagenta: \"FgHiMagenta\",\n\tFgHiCyan: \"FgHiCyan\",\n\tFgHiWhite: \"FgHiWhite\",\n\tBgBlack: \"BgBlack\",\n\tBgRed: \"BgRed\",\n\tBgGreen: \"BgGreen\",\n\tBgYellow: \"BgYellow\",\n\tBgBlue: \"BgBlue\",\n\tBgMagenta: \"BgMagenta\",\n\tBgCyan: \"BgCyan\",\n\tBgWhite: \"BgWhite\",\n\tBgHiBlack: \"BgHiBlack\",\n\tBgHiRed: \"BgHiRed\",\n\tBgHiGreen: \"BgHiGreen\",\n\tBgHiYellow: \"BgHiYellow\",\n\tBgHiBlue: \"BgHiBlue\",\n\tBgHiMagenta: \"BgHiMagenta\",\n\tBgHiCyan: \"BgHiCyan\",\n\tBgHiWhite: \"BgHiWhite\",\n}\n\nfunc (c *ColorAttribute) String() string {\n\treturn colorAttributeToString[*c]\n}\n\nvar colorAttributeFromString = map[string]ColorAttribute{}\n\n\/\/ ColorAttributeFromString will return a ColorAttribute given a string\nfunc ColorAttributeFromString(from string) ColorAttribute {\n\tlowerFrom := strings.TrimSpace(strings.ToLower(from))\n\treturn colorAttributeFromString[lowerFrom]\n}\n\n\/\/ ColorString converts a list of ColorAttributes to a color string\nfunc ColorString(attrs ...ColorAttribute) string {\n\treturn string(ColorBytes(attrs...))\n}\n\n\/\/ ColorBytes converts a list of ColorAttributes to a byte array\nfunc ColorBytes(attrs ...ColorAttribute) []byte {\n\tbytes := make([]byte, 0, 20)\n\tbytes = append(bytes, escape[0], '[')\n\tif len(attrs) > 0 {\n\t\tbytes = append(bytes, strconv.Itoa(int(attrs[0]))...)\n\t\tfor _, a := range attrs[1:] {\n\t\t\tbytes = append(bytes, ';')\n\t\t\tbytes = append(bytes, strconv.Itoa(int(a))...)\n\t\t}\n\t} else {\n\t\tbytes = append(bytes, strconv.Itoa(int(Bold))...)\n\t}\n\tbytes = append(bytes, 'm')\n\treturn bytes\n}\n\nvar levelToColor = map[Level]string{\n\tTRACE: ColorString(Bold, FgCyan),\n\tDEBUG: ColorString(Bold, FgBlue),\n\tINFO: ColorString(Bold, FgGreen),\n\tWARN: ColorString(Bold, FgYellow),\n\tERROR: ColorString(Bold, FgRed),\n\tCRITICAL: ColorString(Bold, BgMagenta),\n\tFATAL: ColorString(Bold, BgRed),\n\tNONE: ColorString(Reset),\n}\n\nvar resetBytes = ColorBytes(Reset)\nvar fgCyanBytes = ColorBytes(FgCyan)\nvar fgGreenBytes = ColorBytes(FgGreen)\nvar fgBoldBytes = ColorBytes(Bold)\n\ntype protectedANSIWriterMode int\n\nconst (\n\tescapeAll protectedANSIWriterMode = iota\n\tallowColor\n\tremoveColor\n)\n\ntype protectedANSIWriter struct {\n\tw io.Writer\n\tmode protectedANSIWriterMode\n}\n\n\/\/ Write will protect against unusual characters\nfunc (c *protectedANSIWriter) Write(bytes []byte) (int, error) {\n\tend := len(bytes)\n\ttotalWritten := 0\nnormalLoop:\n\tfor i := 0; i < end; {\n\t\tlasti := i\n\n\t\tif c.mode == escapeAll {\n\t\t\tfor i < end && (bytes[i] >= ' ' || bytes[i] == '\\n' || bytes[i] == '\\t') {\n\t\t\t\ti++\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Allow tabs if we're not escaping everything\n\t\t\tfor i < end && (bytes[i] >= ' ' || bytes[i] == '\\t') {\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\n\t\tif i > lasti {\n\t\t\twritten, err := c.w.Write(bytes[lasti:i])\n\t\t\ttotalWritten += written\n\t\t\tif err != nil {\n\t\t\t\treturn totalWritten, err\n\t\t\t}\n\n\t\t}\n\t\tif i >= end {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ If we're not just escaping all we should prefix all newlines with a \\t\n\t\tif c.mode != escapeAll {\n\t\t\tif bytes[i] == '\\n' {\n\t\t\t\twritten, err := c.w.Write([]byte{'\\n', '\\t'})\n\t\t\t\tif written > 0 {\n\t\t\t\t\ttotalWritten++\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn totalWritten, err\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\tcontinue normalLoop\n\t\t\t}\n\n\t\t\tif bytes[i] == escape[0] && i+1 < end && bytes[i+1] == '[' {\n\t\t\t\tfor j := i + 2; j < end; j++ {\n\t\t\t\t\tif bytes[j] >= '0' && bytes[j] <= '9' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif bytes[j] == ';' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif bytes[j] == 'm' {\n\t\t\t\t\t\tif c.mode == allowColor {\n\t\t\t\t\t\t\twritten, err := c.w.Write(bytes[i : j+1])\n\t\t\t\t\t\t\ttotalWritten += written\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn totalWritten, err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttotalWritten = j\n\t\t\t\t\t\t}\n\t\t\t\t\t\ti = j + 1\n\t\t\t\t\t\tcontinue normalLoop\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Process naughty character\n\t\tif _, err := fmt.Fprintf(c.w, `\\%#o03d`, bytes[i]); err != nil {\n\t\t\treturn totalWritten, err\n\t\t}\n\t\ti++\n\t\ttotalWritten++\n\t}\n\treturn totalWritten, nil\n}\n\n\/\/ ColorSprintf returns a colored string from a format and arguments\n\/\/ arguments will be wrapped in ColoredValues to protect against color spoofing\nfunc ColorSprintf(format string, args ...interface{}) string {\n\tif len(args) > 0 {\n\t\tv := make([]interface{}, len(args))\n\t\tfor i := 0; i < len(v); i++ {\n\t\t\tv[i] = NewColoredValuePointer(&args[i])\n\t\t}\n\t\treturn fmt.Sprintf(format, v...)\n\t}\n\treturn format\n}\n\n\/\/ ColorFprintf will write to the provided writer similar to ColorSprintf\nfunc ColorFprintf(w io.Writer, format string, args ...interface{}) (int, error) {\n\tif len(args) > 0 {\n\t\tv := make([]interface{}, len(args))\n\t\tfor i := 0; i < len(v); i++ {\n\t\t\tv[i] = NewColoredValuePointer(&args[i])\n\t\t}\n\t\treturn fmt.Fprintf(w, format, v...)\n\t}\n\treturn fmt.Fprint(w, format)\n}\n\n\/\/ ColorFormatted structs provide their own colored string when formatted with ColorSprintf\ntype ColorFormatted interface {\n\t\/\/ ColorFormat provides the colored representation of the value\n\tColorFormat(s fmt.State)\n}\n\nvar colorFormattedType = reflect.TypeOf((*ColorFormatted)(nil)).Elem()\n\n\/\/ ColoredValue will Color the provided value\ntype ColoredValue struct {\n\tcolorBytes *[]byte\n\tresetBytes *[]byte\n\tValue *interface{}\n}\n\n\/\/ NewColoredValue is a helper function to create a ColoredValue from a Value\n\/\/ If no color is provided it defaults to Bold with standard Reset\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredValue(value interface{}, color ...ColorAttribute) *ColoredValue {\n\treturn NewColoredValuePointer(&value, color...)\n}\n\n\/\/ NewColoredValuePointer is a helper function to create a ColoredValue from a Value Pointer\n\/\/ If no color is provided it defaults to Bold with standard Reset\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredValuePointer(value *interface{}, color ...ColorAttribute) *ColoredValue {\n\tif val, ok := (*value).(*ColoredValue); ok {\n\t\treturn val\n\t}\n\tif len(color) > 0 {\n\t\tbytes := ColorBytes(color...)\n\t\treturn &ColoredValue{\n\t\t\tcolorBytes: &bytes,\n\t\t\tresetBytes: &resetBytes,\n\t\t\tValue: value,\n\t\t}\n\t}\n\treturn &ColoredValue{\n\t\tcolorBytes: &fgBoldBytes,\n\t\tresetBytes: &resetBytes,\n\t\tValue: value,\n\t}\n\n}\n\n\/\/ NewColoredValueBytes creates a value from the provided value with color bytes\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredValueBytes(value interface{}, colorBytes *[]byte) *ColoredValue {\n\tif val, ok := value.(*ColoredValue); ok {\n\t\treturn val\n\t}\n\treturn &ColoredValue{\n\t\tcolorBytes: colorBytes,\n\t\tresetBytes: &resetBytes,\n\t\tValue: &value,\n\t}\n}\n\n\/\/ NewColoredIDValue is a helper function to create a ColoredValue from a Value\n\/\/ The Value will be colored with FgCyan\n\/\/ If a ColoredValue is provided it is not changed\nfunc NewColoredIDValue(value interface{}) *ColoredValue {\n\treturn NewColoredValueBytes(value, &fgCyanBytes)\n}\n\n\/\/ Format will format the provided value and protect against ANSI color spoofing within the value\n\/\/ If the wrapped value is ColorFormatted and the format is \"%-v\" then its ColorString will\n\/\/ be used. It is presumed that this ColorString is safe.\nfunc (cv *ColoredValue) Format(s fmt.State, c rune) {\n\tif c == 'v' && s.Flag('-') {\n\t\tif val, ok := (*cv.Value).(ColorFormatted); ok {\n\t\t\tval.ColorFormat(s)\n\t\t\treturn\n\t\t}\n\t\tv := reflect.ValueOf(*cv.Value)\n\t\tt := v.Type()\n\n\t\tif reflect.PtrTo(t).Implements(colorFormattedType) {\n\t\t\tvp := reflect.New(t)\n\t\t\tvp.Elem().Set(v)\n\t\t\tval := vp.Interface().(ColorFormatted)\n\t\t\tval.ColorFormat(s)\n\t\t\treturn\n\t\t}\n\t}\n\ts.Write(*cv.colorBytes)\n\tfmt.Fprintf(&protectedANSIWriter{w: s}, fmtString(s, c), *(cv.Value))\n\ts.Write(*cv.resetBytes)\n}\n\n\/\/ SetColorBytes will allow a user to set the colorBytes of a colored value\nfunc (cv *ColoredValue) SetColorBytes(colorBytes []byte) {\n\tcv.colorBytes = &colorBytes\n}\n\n\/\/ SetColorBytesPointer will allow a user to set the colorBytes pointer of a colored value\nfunc (cv *ColoredValue) SetColorBytesPointer(colorBytes *[]byte) {\n\tcv.colorBytes = colorBytes\n}\n\n\/\/ SetResetBytes will allow a user to set the resetBytes pointer of a colored value\nfunc (cv *ColoredValue) SetResetBytes(resetBytes []byte) {\n\tcv.resetBytes = &resetBytes\n}\n\n\/\/ SetResetBytesPointer will allow a user to set the resetBytes pointer of a colored value\nfunc (cv *ColoredValue) SetResetBytesPointer(resetBytes *[]byte) {\n\tcv.resetBytes = resetBytes\n}\n\nfunc fmtString(s fmt.State, c rune) string {\n\tvar width, precision string\n\tbase := make([]byte, 0, 8)\n\tbase = append(base, '%')\n\tfor _, c := range []byte(\" +-#0\") {\n\t\tif s.Flag(int(c)) {\n\t\t\tbase = append(base, c)\n\t\t}\n\t}\n\tif w, ok := s.Width(); ok {\n\t\twidth = strconv.Itoa(w)\n\t}\n\tif p, ok := s.Precision(); ok {\n\t\tprecision = \".\" + strconv.Itoa(p)\n\t}\n\treturn fmt.Sprintf(\"%s%s%s%c\", base, width, precision, c)\n}\n\nfunc init() {\n\tfor attr, from := range colorAttributeToString {\n\t\tcolorAttributeFromString[strings.ToLower(from)] = attr\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"poliskarta\/helperfunctions\"\n\t\"strings\"\n)\n\n\/\/Used to filter out Europe road names, like \"E6\", \"E22\"\nvar europeRoads []string\n\n\/\/ var arrayOfTitles []string\n\n\/\/Used to include common words in names of places\n\/\/separated by space, like \"Jörgens trappa\", \"Anna Lindhs plats\"\nvar validWordsForPlaces []string\n\n\/\/Used to filter out words for roads followed by numbers,\n\/\/like \"Lv 598\", \"väg 112\" and the like\nvar invalidWordsForRoads []string\n\n\/\/Rule 1:\nfunc Rule1(description string) []string {\n\tfillEuropeRoads()\n\tfillValidWordsForPlaces()\n\tfillInvalidWordsForRoads()\n\n\tprevWordAdded := false\n\n\t\/\/Split on spaces - descWords = array\n\tdescWords := strings.Split(description, \" \")\n\thelperfunctions.TrimSpacesFromArray(&descWords)\n\n\t\/\/The resulting array of words after filtering\n\tplaceWords := []string{}\n\n\t\/\/Loop through the array of words\n\tfor i := 1; i < len(descWords); i++ {\n\n\t\tcurrentWord := descWords[i]\n\t\tprevWord := descWords[i-1]\n\n\t\t\/\/Skip iteration if the previous word had a \".\" in the end\n\t\tif strings.HasSuffix(prevWord, \".\") {\n\t\t\tprevWordAdded = false\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/Check if previous word was added and current word is in valid road list\n\t\tif prevWordAdded {\n\t\t\thelperfunctions.TrimSuffixesFromWord(¤tWord, \".\", \",\")\n\n\t\t\tif helperfunctions.StringInSlice(currentWord, validWordsForPlaces) {\n\t\t\t\tplaceWords = append(placeWords, currentWord)\n\t\t\t\tprevWordAdded = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/Check if current word is part of the invalid road-words\n\t\tif helperfunctions.StringInSlice(currentWord, invalidWordsForRoads) && currentIndexNotLast(i, descWords) {\n\t\t\tnextWordInArray := descWords[i+1]\n\t\t\thelperfunctions.TrimSuffixesFromWord(&nextWordInArray, \".\", \",\")\n\n\t\t\t\/\/Check if next word is number, if so: add it\n\t\t\tif helperfunctions.WordIsNumber(nextWordInArray) {\n\t\t\t\tplaceWords = append(placeWords, nextWordInArray)\n\t\t\t\ti++\n\t\t\t\tprevWordAdded = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/Check if current word starts with uppercase and is NOT europe road\n\t\tif helperfunctions.StartsWithUppercase(currentWord) {\n\t\t\thelperfunctions.TrimSuffixesFromWord(¤tWord, \".\", \",\")\n\t\t\tif !helperfunctions.StringInSliceIgnoreCase(currentWord, europeRoads) {\n\t\t\t\tplaceWords = append(placeWords, currentWord)\n\t\t\t\tprevWordAdded = true\n\t\t\t}\n\t\t} else {\n\t\t\tprevWordAdded = false\n\t\t}\n\t}\n\n\treturn placeWords\n}\nfunc fillEuropeRoads() {\n\teuropeRoads = []string{\"E4\", \"E6\", \"E10\", \"E12\", \"E14\", \"E16\", \"E18\", \"E22\", \"E45\", \"E65\", \"E\", \"Lv\"}\n}\n\nfunc fillValidWordsForPlaces() {\n\tvalidWordsForPlaces = []string{\"väg\", \"gränd\", \"plats\", \"gata\", \"led\", \"torg\", \"park\", \"trappa\", \"trappor\", \"bro\", \"gångbro\", \"allé\", \"alle\", \"aveny\", \"plan\", \"kaj\", \"hamn\", \"strand\", \"stig\", \"backe\", \"kajen\", \"hamnen\", \"holme\", \"holmar\", \"dockan\", \"parkväg\", \"byväg\", \"byaväg\", \"gård\", \"stråket\", \"tvärgata\", \"gårdar\", \"parkgata\", \"idrottsväg\", \"broväg\", \"vägen\", \"stationsgata\", \"hamngata\", \"bangårdsgata\", \"fätåg\", \"kyrkogata\", \"hage\", \"stråket\", \"ö\", \"träsk\", \"flygplats\", \"industriväg\", \"trappgata\", \"kärr\", \"ringvägen\"}\n}\n\nfunc fillInvalidWordsForRoads() {\n\tinvalidWordsForRoads = []string{\"väg\", \"Lv\", \"Länsväg\", \"länsväg\"}\n}\n\nfunc currentIndexNotLast(index int, strings []string) bool {\n\treturn index < len(strings)-1\n}\n<commit_msg>Changed variable name (placeWords -> locationWords)<commit_after>package main\n\nimport (\n\t\"poliskarta\/helperfunctions\"\n\t\"strings\"\n)\n\n\/\/Used to filter out Europe road names, like \"E6\", \"E22\"\nvar europeRoads []string\n\n\/\/ var arrayOfTitles []string\n\n\/\/Used to include common words in names of places\n\/\/separated by space, like \"Jörgens trappa\", \"Anna Lindhs plats\"\nvar validWordsForPlaces []string\n\n\/\/Used to filter out words for roads followed by numbers,\n\/\/like \"Lv 598\", \"väg 112\" and the like\nvar invalidWordsForRoads []string\n\n\/\/Rule 1:\nfunc Rule1(description string) []string {\n\tfillEuropeRoads()\n\tfillValidWordsForPlaces()\n\tfillInvalidWordsForRoads()\n\n\tprevWordAdded := false\n\n\t\/\/Split on spaces - descWords = array\n\tdescWords := strings.Split(description, \" \")\n\thelperfunctions.TrimSpacesFromArray(&descWords)\n\n\t\/\/The resulting array of words after filtering\n\tlocationWords := []string{}\n\n\t\/\/Loop through the array of words\n\tfor i := 1; i < len(descWords); i++ {\n\n\t\tcurrentWord := descWords[i]\n\t\tprevWord := descWords[i-1]\n\n\t\t\/\/Skip iteration if the previous word had a \".\" in the end\n\t\tif strings.HasSuffix(prevWord, \".\") {\n\t\t\tprevWordAdded = false\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/Check if previous word was added and current word is in valid road list\n\t\tif prevWordAdded {\n\t\t\thelperfunctions.TrimSuffixesFromWord(¤tWord, \".\", \",\")\n\n\t\t\tif helperfunctions.StringInSlice(currentWord, validWordsForPlaces) {\n\t\t\t\tlocationWords = append(locationWords, currentWord)\n\t\t\t\tprevWordAdded = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/*\n\t\t\tHär har jag en tanke om att fixa en bakåt-koll när man stöter på en siffra, istället\n\t\t\tför en framåtkoll. Nu kollas ifall VARJE ord finns med i invalid road-listan!\n\t\t\tDå följer man samma struktur som för övriga checkar vi gör. Men det kan också bli kass, har\n\t\t\tinte tänkt igenom det helt.\n\n\t\t\tDessutom: kanske kan fixa en punkt-koll så att man bara behöver göra trim på . och , en gång i början,\n\t\t\talltså först kolla om ordet har punkt eller komma, och isf sätta en bool till true, sen trimma oavsett.\n\t\t\tNu trimmas det tre gånger, och fler lär det bli.\n\t\t*\/\n\n\t\t\/\/Check if current word is part of the invalid road-words\n\t\tif helperfunctions.StringInSlice(currentWord, invalidWordsForRoads) && currentIndexNotLast(i, descWords) {\n\t\t\tnextWordInArray := descWords[i+1]\n\t\t\thelperfunctions.TrimSuffixesFromWord(&nextWordInArray, \".\", \",\")\n\n\t\t\t\/\/Check if next word is number, if so: add it\n\t\t\tif helperfunctions.WordIsNumber(nextWordInArray) {\n\t\t\t\tlocationWords = append(locationWords, nextWordInArray)\n\t\t\t\tprevWordAdded = true\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/Check if current word starts with uppercase and is NOT europe road\n\t\tif helperfunctions.StartsWithUppercase(currentWord) {\n\t\t\thelperfunctions.TrimSuffixesFromWord(¤tWord, \".\", \",\")\n\t\t\tif !helperfunctions.StringInSliceIgnoreCase(currentWord, europeRoads) {\n\t\t\t\taddWordTolocationWords()\n\t\t\t\tlocationWords = append(locationWords, currentWord)\n\t\t\t\tprevWordAdded = true\n\t\t\t}\n\t\t} else {\n\t\t\tprevWordAdded = false\n\t\t}\n\t}\n\n\treturn locationWords\n}\nfunc fillEuropeRoads() {\n\teuropeRoads = []string{\"E4\", \"E6\", \"E10\", \"E12\", \"E14\", \"E16\", \"E18\", \"E22\", \"E45\", \"E65\", \"E\", \"Lv\"}\n}\n\nfunc fillValidWordsForPlaces() {\n\tvalidWordsForPlaces = []string{\"väg\", \"gränd\", \"plats\", \"gata\", \"led\", \"torg\", \"park\", \"trappa\", \"trappor\", \"bro\", \"gångbro\", \"allé\", \"alle\", \"aveny\", \"plan\", \"kaj\", \"hamn\", \"strand\", \"stig\", \"backe\", \"kajen\", \"hamnen\", \"holme\", \"holmar\", \"dockan\", \"parkväg\", \"byväg\", \"byaväg\", \"gård\", \"stråket\", \"tvärgata\", \"gårdar\", \"parkgata\", \"idrottsväg\", \"broväg\", \"vägen\", \"stationsgata\", \"hamngata\", \"bangårdsgata\", \"fätåg\", \"kyrkogata\", \"hage\", \"stråket\", \"ö\", \"träsk\", \"flygplats\", \"industriväg\", \"trappgata\", \"kärr\", \"ringvägen\"}\n}\n\nfunc fillInvalidWordsForRoads() {\n\tinvalidWordsForRoads = []string{\"väg\", \"Lv\", \"Länsväg\", \"länsväg\"}\n}\n\nfunc currentIndexNotLast(index int, strings []string) bool {\n\treturn index < len(strings)-1\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"kubevirt.io\/kubevirt\/tests\/framework\/checks\"\n\n\t. \"github.com\/onsi\/ginkgo\/v2\"\n\t. \"github.com\/onsi\/gomega\"\n\tk8sv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\n\tv1 \"kubevirt.io\/api\/core\/v1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n\n\t\"kubevirt.io\/kubevirt\/pkg\/network\/dns\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n\t\"kubevirt.io\/kubevirt\/tests\/flags\"\n\t\"kubevirt.io\/kubevirt\/tests\/libnet\"\n\t\"kubevirt.io\/kubevirt\/tests\/libstorage\"\n\t\"kubevirt.io\/kubevirt\/tests\/libvmi\"\n\t\"kubevirt.io\/kubevirt\/tests\/util\"\n)\n\nconst (\n\twindowsDisk = \"windows-disk\"\n\twindowsFirmware = \"5d307ca9-b3ef-428c-8861-06e72d69f223\"\n\twindowsVMIUser = \"Administrator\"\n\twindowsVMIPassword = \"Heslo123\"\n)\n\nconst (\n\twinrmCli = \"winrmcli\"\n\twinrmCliCmd = \"winrm-cli\"\n)\n\nvar getWindowsVMISpec = func() v1.VirtualMachineInstanceSpec {\n\tgracePeriod := int64(0)\n\tspinlocks := uint32(8191)\n\tfirmware := types.UID(windowsFirmware)\n\t_false := false\n\treturn v1.VirtualMachineInstanceSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriod,\n\t\tDomain: v1.DomainSpec{\n\t\t\tCPU: &v1.CPU{Cores: 2},\n\t\t\tFeatures: &v1.Features{\n\t\t\t\tACPI: v1.FeatureState{},\n\t\t\t\tAPIC: &v1.FeatureAPIC{},\n\t\t\t\tHyperv: &v1.FeatureHyperv{\n\t\t\t\t\tRelaxed: &v1.FeatureState{},\n\t\t\t\t\tSyNICTimer: &v1.SyNICTimer{Direct: &v1.FeatureState{}},\n\t\t\t\t\tVAPIC: &v1.FeatureState{},\n\t\t\t\t\tSpinlocks: &v1.FeatureSpinlocks{Retries: &spinlocks},\n\t\t\t\t},\n\t\t\t},\n\t\t\tClock: &v1.Clock{\n\t\t\t\tClockOffset: v1.ClockOffset{UTC: &v1.ClockOffsetUTC{}},\n\t\t\t\tTimer: &v1.Timer{\n\t\t\t\t\tHPET: &v1.HPETTimer{Enabled: &_false},\n\t\t\t\t\tPIT: &v1.PITTimer{TickPolicy: v1.PITTickPolicyDelay},\n\t\t\t\t\tRTC: &v1.RTCTimer{TickPolicy: v1.RTCTickPolicyCatchup},\n\t\t\t\t\tHyperv: &v1.HypervTimer{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tFirmware: &v1.Firmware{UUID: firmware},\n\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\tRequests: k8sv1.ResourceList{\n\t\t\t\t\tk8sv1.ResourceMemory: resource.MustParse(\"2048Mi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: v1.Devices{\n\t\t\t\tDisks: []v1.Disk{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: windowsDisk,\n\t\t\t\t\t\tDiskDevice: v1.DiskDevice{\n\t\t\t\t\t\t\tDisk: &v1.DiskTarget{\n\t\t\t\t\t\t\t\tBus: v1.DiskBusSATA,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tVolumes: []v1.Volume{\n\t\t\t{\n\t\t\t\tName: windowsDisk,\n\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\tEphemeral: &v1.EphemeralVolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: tests.DiskWindows,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n}\n\nvar _ = Describe(\"[Serial][sig-compute]Windows VirtualMachineInstance\", func() {\n\tvar err error\n\tvar virtClient kubecli.KubevirtClient\n\n\tvar windowsVMI *v1.VirtualMachineInstance\n\n\tBeforeEach(func() {\n\t\tconst OSWindows = \"windows\"\n\t\tvirtClient, err = kubecli.GetKubevirtClient()\n\t\tutil.PanicOnError(err)\n\t\tchecks.SkipIfMissingRequiredImage(virtClient, tests.DiskWindows)\n\t\tlibstorage.CreatePVC(OSWindows, \"30Gi\", libstorage.Config.StorageClassWindows, true)\n\t\twindowsVMI = tests.NewRandomVMI()\n\t\twindowsVMI.Spec = getWindowsVMISpec()\n\t\ttests.AddExplicitPodNetworkInterface(windowsVMI)\n\t\twindowsVMI.Spec.Domain.Devices.Interfaces[0].Model = \"e1000\"\n\t})\n\n\tContext(\"with winrm connection\", func() {\n\t\tvar winrmcliPod *k8sv1.Pod\n\t\tvar cli []string\n\t\tvar output string\n\n\t\tBeforeEach(func() {\n\t\t\tBy(\"Creating winrm-cli pod for the future use\")\n\t\t\twinrmcliPod = &k8sv1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{GenerateName: winrmCli},\n\t\t\t\tSpec: k8sv1.PodSpec{\n\t\t\t\t\tContainers: []k8sv1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: winrmCli,\n\t\t\t\t\t\t\tImage: fmt.Sprintf(\"%s\/%s:%s\", flags.KubeVirtUtilityRepoPrefix, winrmCli, flags.KubeVirtUtilityVersionTag),\n\t\t\t\t\t\t\tCommand: []string{\"sleep\"},\n\t\t\t\t\t\t\tArgs: []string{\"3600\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\twinrmcliPod, err = virtClient.CoreV1().Pods(util.NamespaceTestDefault).Create(context.Background(), winrmcliPod, metav1.CreateOptions{})\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tContext(\"[ref_id:139]VMI is created\", func() {\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tBy(\"Starting the windows VirtualMachineInstance\")\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(windowsVMI)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\ttests.WaitForSuccessfulVMIStartWithTimeout(windowsVMI, 360)\n\n\t\t\t\tcli = winrnLoginCommand(virtClient, windowsVMI)\n\t\t\t})\n\n\t\t\tIt(\"[test_id:240]should have correct UUID\", func() {\n\t\t\t\tcommand := append(cli, \"wmic csproduct get \\\"UUID\\\"\")\n\t\t\t\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", command))\n\t\t\t\tEventually(func() error {\n\t\t\t\t\toutput, err = tests.ExecuteCommandOnPod(\n\t\t\t\t\t\tvirtClient,\n\t\t\t\t\t\twinrmcliPod,\n\t\t\t\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\t\t\t\tcommand,\n\t\t\t\t\t)\n\t\t\t\t\treturn err\n\t\t\t\t}, time.Minute*5, time.Second*15).ShouldNot(HaveOccurred())\n\t\t\t\tBy(\"Checking that the Windows VirtualMachineInstance has expected UUID\")\n\t\t\t\tExpect(output).Should(ContainSubstring(strings.ToUpper(windowsFirmware)))\n\t\t\t})\n\n\t\t\tIt(\"[test_id:3159]should have default masquerade IP\", func() {\n\t\t\t\tcommand := append(cli, \"ipconfig \/all\")\n\t\t\t\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", command))\n\t\t\t\tEventually(func() error {\n\t\t\t\t\toutput, err = tests.ExecuteCommandOnPod(\n\t\t\t\t\t\tvirtClient,\n\t\t\t\t\t\twinrmcliPod,\n\t\t\t\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\t\t\t\tcommand,\n\t\t\t\t\t)\n\t\t\t\t\treturn err\n\t\t\t\t}, time.Minute*5, time.Second*15).ShouldNot(HaveOccurred())\n\n\t\t\t\tBy(\"Checking that the Windows VirtualMachineInstance has expected IP address\")\n\t\t\t\tExpect(output).Should(ContainSubstring(\"10.0.2.2\"))\n\t\t\t})\n\n\t\t\tIt(\"[test_id:3160]should have the domain set properly\", func() {\n\t\t\t\tsearchDomain := getPodSearchDomain(windowsVMI)\n\t\t\t\tExpect(searchDomain).To(HavePrefix(windowsVMI.Namespace), \"should contain a searchdomain with the namespace of the VMI\")\n\n\t\t\t\trunCommandAndExpectOutput(virtClient,\n\t\t\t\t\twinrmcliPod,\n\t\t\t\t\tcli,\n\t\t\t\t\t\"wmic nicconfig get dnsdomain\",\n\t\t\t\t\t`DNSDomain[\\n\\r\\t ]+`+searchDomain+`[\\n\\r\\t ]+`)\n\t\t\t})\n\t\t})\n\n\t\tContext(\"VMI with subdomain is created\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\twindowsVMI.Spec.Subdomain = \"subdomain\"\n\n\t\t\t\tBy(\"Starting the windows VirtualMachineInstance with subdomain\")\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(windowsVMI)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\ttests.WaitForSuccessfulVMIStartWithTimeout(windowsVMI, 360)\n\n\t\t\t\tcli = winrnLoginCommand(virtClient, windowsVMI)\n\t\t\t})\n\n\t\t\tIt(\"should have the domain set properly with subdomain\", func() {\n\t\t\t\tsearchDomain := getPodSearchDomain(windowsVMI)\n\t\t\t\tExpect(searchDomain).To(HavePrefix(windowsVMI.Namespace), \"should contain a searchdomain with the namespace of the VMI\")\n\n\t\t\t\texpectedSearchDomain := windowsVMI.Spec.Subdomain + \".\" + searchDomain\n\t\t\t\trunCommandAndExpectOutput(virtClient,\n\t\t\t\t\twinrmcliPod,\n\t\t\t\t\tcli,\n\t\t\t\t\t\"wmic nicconfig get dnsdomain\",\n\t\t\t\t\t`DNSDomain[\\n\\r\\t ]+`+expectedSearchDomain+`[\\n\\r\\t ]+`)\n\t\t\t})\n\t\t})\n\n\t\tContext(\"with bridge binding\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tBy(\"Starting Windows VirtualMachineInstance with bridge binding\")\n\t\t\t\twindowsVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{libvmi.InterfaceDeviceWithBridgeBinding(libvmi.DefaultInterfaceName)}\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(windowsVMI)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\ttests.WaitForSuccessfulVMIStartWithTimeout(windowsVMI, 420)\n\n\t\t\t\tcli = winrnLoginCommand(virtClient, windowsVMI)\n\t\t\t})\n\n\t\t\tIt(\"should be recognized by other pods in cluster\", func() {\n\n\t\t\t\tBy(\"Pinging virt-handler Pod from Windows VMI\")\n\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(windowsVMI.Namespace).Get(windowsVMI.Name, &metav1.GetOptions{})\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tgetVirtHandlerPod := func() (*k8sv1.Pod, error) {\n\t\t\t\t\twinVmiPod := tests.GetRunningPodByVirtualMachineInstance(windowsVMI, windowsVMI.Namespace)\n\t\t\t\t\tnodeName := winVmiPod.Spec.NodeName\n\n\t\t\t\t\tpod, err := kubecli.NewVirtHandlerClient(virtClient).Namespace(flags.KubeVirtInstallNamespace).ForNode(nodeName).Pod()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"failed to get virt-handler pod on node %s: %v\", nodeName, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn pod, nil\n\t\t\t\t}\n\n\t\t\t\tvirtHandlerPod, err := getVirtHandlerPod()\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tvirtHandlerPodIP := libnet.GetPodIPByFamily(virtHandlerPod, k8sv1.IPv4Protocol)\n\n\t\t\t\tcommand := append(cli, fmt.Sprintf(\"ping %s\", virtHandlerPodIP))\n\n\t\t\t\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", command))\n\t\t\t\tEventually(func() error {\n\t\t\t\t\t_, err = tests.ExecuteCommandOnPod(\n\t\t\t\t\t\tvirtClient,\n\t\t\t\t\t\twinrmcliPod,\n\t\t\t\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\t\t\t\tcommand,\n\t\t\t\t\t)\n\t\t\t\t\treturn err\n\t\t\t\t}, time.Minute*1, time.Second*15).Should(Succeed())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc winrnLoginCommand(virtClient kubecli.KubevirtClient, windowsVMI *v1.VirtualMachineInstance) []string {\n\tvar err error\n\twindowsVMI, err = virtClient.VirtualMachineInstance(windowsVMI.Namespace).Get(windowsVMI.Name, &metav1.GetOptions{})\n\tExpectWithOffset(1, err).ToNot(HaveOccurred())\n\n\tvmiIp := windowsVMI.Status.Interfaces[0].IP\n\tcli := []string{\n\t\twinrmCliCmd,\n\t\t\"-hostname\",\n\t\tvmiIp,\n\t\t\"-username\",\n\t\twindowsVMIUser,\n\t\t\"-password\",\n\t\twindowsVMIPassword,\n\t}\n\n\treturn cli\n}\n\nfunc getPodSearchDomain(windowsVMI *v1.VirtualMachineInstance) string {\n\tBy(\"fetching \/etc\/resolv.conf from the VMI Pod\")\n\tresolvConf := tests.RunCommandOnVmiPod(windowsVMI, []string{\"cat\", \"\/etc\/resolv.conf\"})\n\n\tBy(\"extracting the search domain of the VMI\")\n\tsearchDomains, err := dns.ParseSearchDomains(resolvConf)\n\tExpectWithOffset(1, err).ToNot(HaveOccurred())\n\tsearchDomain := \"\"\n\tfor _, s := range searchDomains {\n\t\tif len(searchDomain) < len(s) {\n\t\t\tsearchDomain = s\n\t\t}\n\t}\n\n\treturn searchDomain\n}\n\nfunc runCommandAndExpectOutput(virtClient kubecli.KubevirtClient, winrmcliPod *k8sv1.Pod, cli []string, command, expectedOutputRegex string) {\n\tcliCmd := append(cli, command)\n\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", cliCmd))\n\tBy(\"first making sure that we can execute VMI commands\")\n\tEventuallyWithOffset(1, func() error {\n\t\t_, err := tests.ExecuteCommandOnPod(\n\t\t\tvirtClient,\n\t\t\twinrmcliPod,\n\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\tcliCmd,\n\t\t)\n\t\treturn err\n\t}, time.Minute*5, time.Second*15).ShouldNot(HaveOccurred())\n\n\tBy(\"repeatedly trying to get the search domain, since it may take some time until the domain is set\")\n\tEventuallyWithOffset(1, func() string {\n\t\toutput, err := tests.ExecuteCommandOnPod(\n\t\t\tvirtClient,\n\t\t\twinrmcliPod,\n\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\tcliCmd,\n\t\t)\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\treturn output\n\t}, time.Minute*1, time.Second*10).Should(MatchRegexp(expectedOutputRegex))\n}\n<commit_msg>tests: windows: Make variables local<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"kubevirt.io\/kubevirt\/tests\/framework\/checks\"\n\n\t. \"github.com\/onsi\/ginkgo\/v2\"\n\t. \"github.com\/onsi\/gomega\"\n\tk8sv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\n\tv1 \"kubevirt.io\/api\/core\/v1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n\n\t\"kubevirt.io\/kubevirt\/pkg\/network\/dns\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n\t\"kubevirt.io\/kubevirt\/tests\/flags\"\n\t\"kubevirt.io\/kubevirt\/tests\/libnet\"\n\t\"kubevirt.io\/kubevirt\/tests\/libstorage\"\n\t\"kubevirt.io\/kubevirt\/tests\/libvmi\"\n\t\"kubevirt.io\/kubevirt\/tests\/util\"\n)\n\nconst (\n\twindowsDisk = \"windows-disk\"\n\twindowsFirmware = \"5d307ca9-b3ef-428c-8861-06e72d69f223\"\n\twindowsVMIUser = \"Administrator\"\n\twindowsVMIPassword = \"Heslo123\"\n)\n\nconst (\n\twinrmCli = \"winrmcli\"\n\twinrmCliCmd = \"winrm-cli\"\n)\n\nvar getWindowsVMISpec = func() v1.VirtualMachineInstanceSpec {\n\tgracePeriod := int64(0)\n\tspinlocks := uint32(8191)\n\tfirmware := types.UID(windowsFirmware)\n\t_false := false\n\treturn v1.VirtualMachineInstanceSpec{\n\t\tTerminationGracePeriodSeconds: &gracePeriod,\n\t\tDomain: v1.DomainSpec{\n\t\t\tCPU: &v1.CPU{Cores: 2},\n\t\t\tFeatures: &v1.Features{\n\t\t\t\tACPI: v1.FeatureState{},\n\t\t\t\tAPIC: &v1.FeatureAPIC{},\n\t\t\t\tHyperv: &v1.FeatureHyperv{\n\t\t\t\t\tRelaxed: &v1.FeatureState{},\n\t\t\t\t\tSyNICTimer: &v1.SyNICTimer{Direct: &v1.FeatureState{}},\n\t\t\t\t\tVAPIC: &v1.FeatureState{},\n\t\t\t\t\tSpinlocks: &v1.FeatureSpinlocks{Retries: &spinlocks},\n\t\t\t\t},\n\t\t\t},\n\t\t\tClock: &v1.Clock{\n\t\t\t\tClockOffset: v1.ClockOffset{UTC: &v1.ClockOffsetUTC{}},\n\t\t\t\tTimer: &v1.Timer{\n\t\t\t\t\tHPET: &v1.HPETTimer{Enabled: &_false},\n\t\t\t\t\tPIT: &v1.PITTimer{TickPolicy: v1.PITTickPolicyDelay},\n\t\t\t\t\tRTC: &v1.RTCTimer{TickPolicy: v1.RTCTickPolicyCatchup},\n\t\t\t\t\tHyperv: &v1.HypervTimer{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tFirmware: &v1.Firmware{UUID: firmware},\n\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\tRequests: k8sv1.ResourceList{\n\t\t\t\t\tk8sv1.ResourceMemory: resource.MustParse(\"2048Mi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tDevices: v1.Devices{\n\t\t\t\tDisks: []v1.Disk{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: windowsDisk,\n\t\t\t\t\t\tDiskDevice: v1.DiskDevice{\n\t\t\t\t\t\t\tDisk: &v1.DiskTarget{\n\t\t\t\t\t\t\t\tBus: v1.DiskBusSATA,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tVolumes: []v1.Volume{\n\t\t\t{\n\t\t\t\tName: windowsDisk,\n\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\tEphemeral: &v1.EphemeralVolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: tests.DiskWindows,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n}\n\nvar _ = Describe(\"[Serial][sig-compute]Windows VirtualMachineInstance\", func() {\n\tvar virtClient kubecli.KubevirtClient\n\tvar windowsVMI *v1.VirtualMachineInstance\n\n\tBeforeEach(func() {\n\t\tconst OSWindows = \"windows\"\n\t\tvar err error\n\t\tvirtClient, err = kubecli.GetKubevirtClient()\n\t\tutil.PanicOnError(err)\n\t\tchecks.SkipIfMissingRequiredImage(virtClient, tests.DiskWindows)\n\t\tlibstorage.CreatePVC(OSWindows, \"30Gi\", libstorage.Config.StorageClassWindows, true)\n\t\twindowsVMI = tests.NewRandomVMI()\n\t\twindowsVMI.Spec = getWindowsVMISpec()\n\t\ttests.AddExplicitPodNetworkInterface(windowsVMI)\n\t\twindowsVMI.Spec.Domain.Devices.Interfaces[0].Model = \"e1000\"\n\t})\n\n\tContext(\"with winrm connection\", func() {\n\t\tvar winrmcliPod *k8sv1.Pod\n\t\tvar cli []string\n\n\t\tBeforeEach(func() {\n\t\t\tBy(\"Creating winrm-cli pod for the future use\")\n\t\t\twinrmcliPod = &k8sv1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{GenerateName: winrmCli},\n\t\t\t\tSpec: k8sv1.PodSpec{\n\t\t\t\t\tContainers: []k8sv1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: winrmCli,\n\t\t\t\t\t\t\tImage: fmt.Sprintf(\"%s\/%s:%s\", flags.KubeVirtUtilityRepoPrefix, winrmCli, flags.KubeVirtUtilityVersionTag),\n\t\t\t\t\t\t\tCommand: []string{\"sleep\"},\n\t\t\t\t\t\t\tArgs: []string{\"3600\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\twinrmcliPod, err = virtClient.CoreV1().Pods(util.NamespaceTestDefault).Create(context.Background(), winrmcliPod, metav1.CreateOptions{})\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tContext(\"[ref_id:139]VMI is created\", func() {\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tBy(\"Starting the windows VirtualMachineInstance\")\n\t\t\t\tvar err error\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(windowsVMI)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\ttests.WaitForSuccessfulVMIStartWithTimeout(windowsVMI, 360)\n\n\t\t\t\tcli = winrnLoginCommand(virtClient, windowsVMI)\n\t\t\t})\n\n\t\t\tIt(\"[test_id:240]should have correct UUID\", func() {\n\t\t\t\tcommand := append(cli, \"wmic csproduct get \\\"UUID\\\"\")\n\t\t\t\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", command))\n\t\t\t\tvar output string\n\t\t\t\tEventually(func() error {\n\t\t\t\t\tvar err error\n\t\t\t\t\toutput, err = tests.ExecuteCommandOnPod(\n\t\t\t\t\t\tvirtClient,\n\t\t\t\t\t\twinrmcliPod,\n\t\t\t\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\t\t\t\tcommand,\n\t\t\t\t\t)\n\t\t\t\t\treturn err\n\t\t\t\t}, time.Minute*5, time.Second*15).ShouldNot(HaveOccurred())\n\t\t\t\tBy(\"Checking that the Windows VirtualMachineInstance has expected UUID\")\n\t\t\t\tExpect(output).Should(ContainSubstring(strings.ToUpper(windowsFirmware)))\n\t\t\t})\n\n\t\t\tIt(\"[test_id:3159]should have default masquerade IP\", func() {\n\t\t\t\tcommand := append(cli, \"ipconfig \/all\")\n\t\t\t\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", command))\n\t\t\t\tvar output string\n\t\t\t\tEventually(func() error {\n\t\t\t\t\tvar err error\n\t\t\t\t\toutput, err = tests.ExecuteCommandOnPod(\n\t\t\t\t\t\tvirtClient,\n\t\t\t\t\t\twinrmcliPod,\n\t\t\t\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\t\t\t\tcommand,\n\t\t\t\t\t)\n\t\t\t\t\treturn err\n\t\t\t\t}, time.Minute*5, time.Second*15).ShouldNot(HaveOccurred())\n\n\t\t\t\tBy(\"Checking that the Windows VirtualMachineInstance has expected IP address\")\n\t\t\t\tExpect(output).Should(ContainSubstring(\"10.0.2.2\"))\n\t\t\t})\n\n\t\t\tIt(\"[test_id:3160]should have the domain set properly\", func() {\n\t\t\t\tsearchDomain := getPodSearchDomain(windowsVMI)\n\t\t\t\tExpect(searchDomain).To(HavePrefix(windowsVMI.Namespace), \"should contain a searchdomain with the namespace of the VMI\")\n\n\t\t\t\trunCommandAndExpectOutput(virtClient,\n\t\t\t\t\twinrmcliPod,\n\t\t\t\t\tcli,\n\t\t\t\t\t\"wmic nicconfig get dnsdomain\",\n\t\t\t\t\t`DNSDomain[\\n\\r\\t ]+`+searchDomain+`[\\n\\r\\t ]+`)\n\t\t\t})\n\t\t})\n\n\t\tContext(\"VMI with subdomain is created\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\twindowsVMI.Spec.Subdomain = \"subdomain\"\n\n\t\t\t\tBy(\"Starting the windows VirtualMachineInstance with subdomain\")\n\t\t\t\tvar err error\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(windowsVMI)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\ttests.WaitForSuccessfulVMIStartWithTimeout(windowsVMI, 360)\n\n\t\t\t\tcli = winrnLoginCommand(virtClient, windowsVMI)\n\t\t\t})\n\n\t\t\tIt(\"should have the domain set properly with subdomain\", func() {\n\t\t\t\tsearchDomain := getPodSearchDomain(windowsVMI)\n\t\t\t\tExpect(searchDomain).To(HavePrefix(windowsVMI.Namespace), \"should contain a searchdomain with the namespace of the VMI\")\n\n\t\t\t\texpectedSearchDomain := windowsVMI.Spec.Subdomain + \".\" + searchDomain\n\t\t\t\trunCommandAndExpectOutput(virtClient,\n\t\t\t\t\twinrmcliPod,\n\t\t\t\t\tcli,\n\t\t\t\t\t\"wmic nicconfig get dnsdomain\",\n\t\t\t\t\t`DNSDomain[\\n\\r\\t ]+`+expectedSearchDomain+`[\\n\\r\\t ]+`)\n\t\t\t})\n\t\t})\n\n\t\tContext(\"with bridge binding\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tBy(\"Starting Windows VirtualMachineInstance with bridge binding\")\n\t\t\t\twindowsVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{libvmi.InterfaceDeviceWithBridgeBinding(libvmi.DefaultInterfaceName)}\n\t\t\t\tvar err error\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(windowsVMI)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\ttests.WaitForSuccessfulVMIStartWithTimeout(windowsVMI, 420)\n\n\t\t\t\tcli = winrnLoginCommand(virtClient, windowsVMI)\n\t\t\t})\n\n\t\t\tIt(\"should be recognized by other pods in cluster\", func() {\n\n\t\t\t\tBy(\"Pinging virt-handler Pod from Windows VMI\")\n\n\t\t\t\tvar err error\n\t\t\t\twindowsVMI, err = virtClient.VirtualMachineInstance(windowsVMI.Namespace).Get(windowsVMI.Name, &metav1.GetOptions{})\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tgetVirtHandlerPod := func() (*k8sv1.Pod, error) {\n\t\t\t\t\twinVmiPod := tests.GetRunningPodByVirtualMachineInstance(windowsVMI, windowsVMI.Namespace)\n\t\t\t\t\tnodeName := winVmiPod.Spec.NodeName\n\n\t\t\t\t\tpod, err := kubecli.NewVirtHandlerClient(virtClient).Namespace(flags.KubeVirtInstallNamespace).ForNode(nodeName).Pod()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"failed to get virt-handler pod on node %s: %v\", nodeName, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn pod, nil\n\t\t\t\t}\n\n\t\t\t\tvirtHandlerPod, err := getVirtHandlerPod()\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tvirtHandlerPodIP := libnet.GetPodIPByFamily(virtHandlerPod, k8sv1.IPv4Protocol)\n\n\t\t\t\tcommand := append(cli, fmt.Sprintf(\"ping %s\", virtHandlerPodIP))\n\n\t\t\t\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", command))\n\t\t\t\tEventually(func() error {\n\t\t\t\t\t_, err = tests.ExecuteCommandOnPod(\n\t\t\t\t\t\tvirtClient,\n\t\t\t\t\t\twinrmcliPod,\n\t\t\t\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\t\t\t\tcommand,\n\t\t\t\t\t)\n\t\t\t\t\treturn err\n\t\t\t\t}, time.Minute*1, time.Second*15).Should(Succeed())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc winrnLoginCommand(virtClient kubecli.KubevirtClient, windowsVMI *v1.VirtualMachineInstance) []string {\n\tvar err error\n\twindowsVMI, err = virtClient.VirtualMachineInstance(windowsVMI.Namespace).Get(windowsVMI.Name, &metav1.GetOptions{})\n\tExpectWithOffset(1, err).ToNot(HaveOccurred())\n\n\tvmiIp := windowsVMI.Status.Interfaces[0].IP\n\tcli := []string{\n\t\twinrmCliCmd,\n\t\t\"-hostname\",\n\t\tvmiIp,\n\t\t\"-username\",\n\t\twindowsVMIUser,\n\t\t\"-password\",\n\t\twindowsVMIPassword,\n\t}\n\n\treturn cli\n}\n\nfunc getPodSearchDomain(windowsVMI *v1.VirtualMachineInstance) string {\n\tBy(\"fetching \/etc\/resolv.conf from the VMI Pod\")\n\tresolvConf := tests.RunCommandOnVmiPod(windowsVMI, []string{\"cat\", \"\/etc\/resolv.conf\"})\n\n\tBy(\"extracting the search domain of the VMI\")\n\tsearchDomains, err := dns.ParseSearchDomains(resolvConf)\n\tExpectWithOffset(1, err).ToNot(HaveOccurred())\n\tsearchDomain := \"\"\n\tfor _, s := range searchDomains {\n\t\tif len(searchDomain) < len(s) {\n\t\t\tsearchDomain = s\n\t\t}\n\t}\n\n\treturn searchDomain\n}\n\nfunc runCommandAndExpectOutput(virtClient kubecli.KubevirtClient, winrmcliPod *k8sv1.Pod, cli []string, command, expectedOutputRegex string) {\n\tcliCmd := append(cli, command)\n\tBy(fmt.Sprintf(\"Running \\\"%s\\\" command via winrm-cli\", cliCmd))\n\tBy(\"first making sure that we can execute VMI commands\")\n\tEventuallyWithOffset(1, func() error {\n\t\t_, err := tests.ExecuteCommandOnPod(\n\t\t\tvirtClient,\n\t\t\twinrmcliPod,\n\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\tcliCmd,\n\t\t)\n\t\treturn err\n\t}, time.Minute*5, time.Second*15).ShouldNot(HaveOccurred())\n\n\tBy(\"repeatedly trying to get the search domain, since it may take some time until the domain is set\")\n\tEventuallyWithOffset(1, func() string {\n\t\toutput, err := tests.ExecuteCommandOnPod(\n\t\t\tvirtClient,\n\t\t\twinrmcliPod,\n\t\t\twinrmcliPod.Spec.Containers[0].Name,\n\t\t\tcliCmd,\n\t\t)\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\treturn output\n\t}, time.Minute*1, time.Second*10).Should(MatchRegexp(expectedOutputRegex))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n)\n\ntype Counter struct {\n\tctx *Context\n\tc chan int\n\ti int\n}\n\nfunc NewCounter(ctx *Context, wg *sync.WaitGroup) *Counter {\n\tcounter := new(Counter)\n\tcounter.c = make(chan int)\n\tcounter.ctx = ctx\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdone := counter.ctx.GetDone()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase counter.c <- counter.i:\n\t\t\t\tcounter.i += 1\n\t\t\tcase <-done:\n\t\t\t\t\/\/ Add logging here.\n\t\t\t\tfmt.Println(\"Counter terminated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn counter\n}\n\nfunc (c *Counter) GetSource() <-chan int {\n\treturn c.c\n}\n<commit_msg>Changed number increment for multiplier counter.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n)\n\ntype Counter struct {\n\tctx *Context\n\tc chan int\n\ti int\n}\n\nfunc NewCounter(ctx *Context, wg *sync.WaitGroup) *Counter {\n\tcounter := new(Counter)\n\tcounter.c = make(chan int)\n\tcounter.ctx = ctx\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdone := counter.ctx.GetDone()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase counter.c <- counter.i:\n\t\t\t\tcounter.i++\n\t\t\tcase <-done:\n\t\t\t\tfmt.Println(\"Counter terminated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn counter\n}\n\nfunc (c *Counter) GetSource() <-chan int {\n\treturn c.c\n}\n<|endoftext|>"} {"text":"<commit_before>package machinenodelookup\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/rancher\/lasso\/pkg\/dynamic\"\n\tv1 \"github.com\/rancher\/rancher\/pkg\/apis\/provisioning.cattle.io\/v1\"\n\trkev1 \"github.com\/rancher\/rancher\/pkg\/apis\/rke.cattle.io\/v1\"\n\tcapicontrollers \"github.com\/rancher\/rancher\/pkg\/generated\/controllers\/cluster.x-k8s.io\/v1alpha4\"\n\tranchercontrollers \"github.com\/rancher\/rancher\/pkg\/generated\/controllers\/provisioning.cattle.io\/v1\"\n\trkecontroller \"github.com\/rancher\/rancher\/pkg\/generated\/controllers\/rke.cattle.io\/v1\"\n\t\"github.com\/rancher\/rancher\/pkg\/provisioningv2\/kubeconfig\"\n\t\"github.com\/rancher\/rancher\/pkg\/provisioningv2\/rke2\/planner\"\n\t\"github.com\/rancher\/rancher\/pkg\/provisioningv2\/rke2\/runtime\"\n\t\"github.com\/rancher\/rancher\/pkg\/wrangler\"\n\t\"github.com\/rancher\/wrangler\/pkg\/data\"\n\t\"github.com\/rancher\/wrangler\/pkg\/generic\"\n\t\"github.com\/sirupsen\/logrus\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tapierror \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\tcapi \"sigs.k8s.io\/cluster-api\/api\/v1alpha4\"\n)\n\nconst (\n\tClusterNameLabel = \"rke.cattle.io\/cluster-name\"\n\tplanSecret = \"rke.cattle.io\/plan-secret-name\"\n\troleLabel = \"rke.cattle.io\/service-account-role\"\n\troleBootstrap = \"bootstrap\"\n\trolePlan = \"plan\"\n\n\tnodeErrorEnqueueTime = 15 * time.Second\n)\n\nvar (\n\tbootstrapAPIVersion = fmt.Sprintf(\"%s\/%s\", rkev1.SchemeGroupVersion.Group, rkev1.SchemeGroupVersion.Version)\n)\n\ntype handler struct {\n\trancherClusterCache ranchercontrollers.ClusterCache\n\tmachineCache capicontrollers.MachineCache\n\tmachines capicontrollers.MachineController\n\trkeBootstrap rkecontroller.RKEBootstrapController\n\tkubeconfigManager *kubeconfig.Manager\n\tdynamic *dynamic.Controller\n}\n\nfunc Register(ctx context.Context, clients *wrangler.Context) {\n\th := &handler{\n\t\trancherClusterCache: clients.Provisioning.Cluster().Cache(),\n\t\tmachines: clients.CAPI.Machine(),\n\t\tmachineCache: clients.CAPI.Machine().Cache(),\n\t\trkeBootstrap: clients.RKE.RKEBootstrap(),\n\t\tkubeconfigManager: kubeconfig.New(clients),\n\t\tdynamic: clients.Dynamic,\n\t}\n\n\tclients.RKE.RKEBootstrap().OnChange(ctx, \"machine-node-lookup\", h.associateMachineWithNode)\n}\n\nfunc (h *handler) getMachine(obj *rkev1.RKEBootstrap) (*capi.Machine, error) {\n\tfor _, ref := range obj.OwnerReferences {\n\t\tgvk := schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind)\n\t\tif capi.GroupVersion.Group != gvk.Group ||\n\t\t\tref.Kind != \"Machine\" {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn h.machineCache.Get(obj.Namespace, ref.Name)\n\t}\n\treturn nil, generic.ErrSkip\n}\n\nfunc (h *handler) associateMachineWithNode(_ string, bootstrap *rkev1.RKEBootstrap) (*rkev1.RKEBootstrap, error) {\n\tif bootstrap == nil || bootstrap.DeletionTimestamp != nil {\n\t\treturn bootstrap, nil\n\t}\n\n\tif !bootstrap.Status.Ready || bootstrap.Status.DataSecretName == nil || *bootstrap.Status.DataSecretName == \"\" {\n\t\treturn bootstrap, nil\n\t}\n\n\tmachine, err := h.getMachine(bootstrap)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tif machine.Spec.ProviderID != nil && *machine.Spec.ProviderID != \"\" {\n\t\t\/\/ If the machine already has its provider ID set, then we do not need to continue\n\t\treturn bootstrap, nil\n\t}\n\n\trancherCluster, err := h.rancherClusterCache.Get(machine.Namespace, machine.Spec.ClusterName)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tconfig, err := h.kubeconfigManager.GetRESTConfig(rancherCluster, rancherCluster.Status)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tnodeLabelSelector := metav1.LabelSelector{MatchLabels: map[string]string{planner.MachineUIDLabel: string(machine.GetUID())}}\n\tnodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Set(nodeLabelSelector.MatchLabels).String()})\n\tif err != nil || len(nodes.Items) == 0 || nodes.Items[0].Spec.ProviderID == \"\" {\n\t\tlogrus.Debugf(\"Searching for providerID for selector %s in cluster %s\/%s, machine %s: %v\",\n\t\t\tlabels.Set(nodeLabelSelector.MatchLabels), rancherCluster.Namespace, rancherCluster.Name, machine.Name, err)\n\t\th.rkeBootstrap.EnqueueAfter(bootstrap.Namespace, bootstrap.Name, nodeErrorEnqueueTime)\n\t\treturn bootstrap, nil\n\t}\n\n\treturn bootstrap, h.updateMachine(&nodes.Items[0], machine, rancherCluster)\n}\n\nfunc (h *handler) updateMachineJoinURL(node *corev1.Node, machine *capi.Machine, rancherCluster *v1.Cluster) error {\n\taddress := \"\"\n\tfor _, nodeAddress := range node.Status.Addresses {\n\t\tswitch nodeAddress.Type {\n\t\tcase corev1.NodeInternalIP:\n\t\t\taddress = nodeAddress.Address\n\t\tcase corev1.NodeExternalIP:\n\t\t\tif address == \"\" {\n\t\t\t\taddress = nodeAddress.Address\n\t\t\t}\n\t\t}\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/%s:%d\", address, runtime.GetRuntimeSupervisorPort(rancherCluster.Spec.KubernetesVersion))\n\tif machine.Annotations[planner.JoinURLAnnotation] == url {\n\t\treturn nil\n\t}\n\n\tmachine = machine.DeepCopy()\n\tif machine.Annotations == nil {\n\t\tmachine.Annotations = map[string]string{}\n\t}\n\n\tmachine.Annotations[planner.JoinURLAnnotation] = url\n\t_, err := h.machines.Update(machine)\n\treturn err\n}\n\nfunc (h *handler) updateMachine(node *corev1.Node, machine *capi.Machine, rancherCluster *v1.Cluster) error {\n\tif err := h.updateMachineJoinURL(node, machine, rancherCluster); err != nil {\n\t\treturn err\n\t}\n\n\tgvk := schema.FromAPIVersionAndKind(machine.Spec.InfrastructureRef.APIVersion, machine.Spec.InfrastructureRef.Kind)\n\tinfra, err := h.dynamic.Get(gvk, machine.Namespace, machine.Spec.InfrastructureRef.Name)\n\tif apierror.IsNotFound(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\td, err := data.Convert(infra)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d.String(\"spec\", \"providerID\") != node.Spec.ProviderID {\n\t\tobj, err := data.Convert(infra.DeepCopyObject())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj.SetNested(node.Status.Addresses, \"status\", \"addresses\")\n\t\tnewObj, err := h.dynamic.UpdateStatus(&unstructured.Unstructured{\n\t\t\tObject: obj,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj, err = data.Convert(newObj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj.SetNested(node.Spec.ProviderID, \"spec\", \"providerID\")\n\t\t_, err = h.dynamic.Update(&unstructured.Unstructured{\n\t\t\tObject: obj,\n\t\t})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Fix race on node Ready condition on provision<commit_after>package machinenodelookup\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/rancher\/lasso\/pkg\/dynamic\"\n\tv1 \"github.com\/rancher\/rancher\/pkg\/apis\/provisioning.cattle.io\/v1\"\n\trkev1 \"github.com\/rancher\/rancher\/pkg\/apis\/rke.cattle.io\/v1\"\n\tcapicontrollers \"github.com\/rancher\/rancher\/pkg\/generated\/controllers\/cluster.x-k8s.io\/v1alpha4\"\n\tranchercontrollers \"github.com\/rancher\/rancher\/pkg\/generated\/controllers\/provisioning.cattle.io\/v1\"\n\trkecontroller \"github.com\/rancher\/rancher\/pkg\/generated\/controllers\/rke.cattle.io\/v1\"\n\t\"github.com\/rancher\/rancher\/pkg\/provisioningv2\/kubeconfig\"\n\t\"github.com\/rancher\/rancher\/pkg\/provisioningv2\/rke2\/planner\"\n\t\"github.com\/rancher\/rancher\/pkg\/provisioningv2\/rke2\/runtime\"\n\t\"github.com\/rancher\/rancher\/pkg\/wrangler\"\n\t\"github.com\/rancher\/wrangler\/pkg\/condition\"\n\t\"github.com\/rancher\/wrangler\/pkg\/data\"\n\t\"github.com\/rancher\/wrangler\/pkg\/generic\"\n\t\"github.com\/sirupsen\/logrus\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tapierror \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\tcapi \"sigs.k8s.io\/cluster-api\/api\/v1alpha4\"\n)\n\nconst (\n\tClusterNameLabel = \"rke.cattle.io\/cluster-name\"\n\tplanSecret = \"rke.cattle.io\/plan-secret-name\"\n\troleLabel = \"rke.cattle.io\/service-account-role\"\n\troleBootstrap = \"bootstrap\"\n\trolePlan = \"plan\"\n\n\tnodeErrorEnqueueTime = 15 * time.Second\n)\n\nvar (\n\tbootstrapAPIVersion = fmt.Sprintf(\"%s\/%s\", rkev1.SchemeGroupVersion.Group, rkev1.SchemeGroupVersion.Version)\n)\n\ntype handler struct {\n\trancherClusterCache ranchercontrollers.ClusterCache\n\tmachineCache capicontrollers.MachineCache\n\tmachines capicontrollers.MachineController\n\trkeBootstrap rkecontroller.RKEBootstrapController\n\tkubeconfigManager *kubeconfig.Manager\n\tdynamic *dynamic.Controller\n}\n\nfunc Register(ctx context.Context, clients *wrangler.Context) {\n\th := &handler{\n\t\trancherClusterCache: clients.Provisioning.Cluster().Cache(),\n\t\tmachines: clients.CAPI.Machine(),\n\t\tmachineCache: clients.CAPI.Machine().Cache(),\n\t\trkeBootstrap: clients.RKE.RKEBootstrap(),\n\t\tkubeconfigManager: kubeconfig.New(clients),\n\t\tdynamic: clients.Dynamic,\n\t}\n\n\tclients.RKE.RKEBootstrap().OnChange(ctx, \"machine-node-lookup\", h.associateMachineWithNode)\n}\n\nfunc (h *handler) getMachine(obj *rkev1.RKEBootstrap) (*capi.Machine, error) {\n\tfor _, ref := range obj.OwnerReferences {\n\t\tgvk := schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind)\n\t\tif capi.GroupVersion.Group != gvk.Group ||\n\t\t\tref.Kind != \"Machine\" {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn h.machineCache.Get(obj.Namespace, ref.Name)\n\t}\n\treturn nil, generic.ErrSkip\n}\n\nfunc (h *handler) associateMachineWithNode(_ string, bootstrap *rkev1.RKEBootstrap) (*rkev1.RKEBootstrap, error) {\n\tif bootstrap == nil || bootstrap.DeletionTimestamp != nil {\n\t\treturn bootstrap, nil\n\t}\n\n\tif !bootstrap.Status.Ready || bootstrap.Status.DataSecretName == nil || *bootstrap.Status.DataSecretName == \"\" {\n\t\treturn bootstrap, nil\n\t}\n\n\tmachine, err := h.getMachine(bootstrap)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tif machine.Spec.ProviderID != nil && *machine.Spec.ProviderID != \"\" {\n\t\t\/\/ If the machine already has its provider ID set, then we do not need to continue\n\t\treturn bootstrap, nil\n\t}\n\n\trancherCluster, err := h.rancherClusterCache.Get(machine.Namespace, machine.Spec.ClusterName)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tconfig, err := h.kubeconfigManager.GetRESTConfig(rancherCluster, rancherCluster.Status)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn bootstrap, err\n\t}\n\n\tnodeLabelSelector := metav1.LabelSelector{MatchLabels: map[string]string{planner.MachineUIDLabel: string(machine.GetUID())}}\n\tnodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Set(nodeLabelSelector.MatchLabels).String()})\n\tif err != nil || len(nodes.Items) == 0 || nodes.Items[0].Spec.ProviderID == \"\" || !condition.Cond(\"Ready\").IsTrue(nodes.Items[0]) {\n\t\tlogrus.Debugf(\"Searching for providerID for selector %s in cluster %s\/%s, machine %s: %v\",\n\t\t\tlabels.Set(nodeLabelSelector.MatchLabels), rancherCluster.Namespace, rancherCluster.Name, machine.Name, err)\n\t\th.rkeBootstrap.EnqueueAfter(bootstrap.Namespace, bootstrap.Name, nodeErrorEnqueueTime)\n\t\treturn bootstrap, nil\n\t}\n\n\treturn bootstrap, h.updateMachine(&nodes.Items[0], machine, rancherCluster)\n}\n\nfunc (h *handler) updateMachineJoinURL(node *corev1.Node, machine *capi.Machine, rancherCluster *v1.Cluster) error {\n\taddress := \"\"\n\tfor _, nodeAddress := range node.Status.Addresses {\n\t\tswitch nodeAddress.Type {\n\t\tcase corev1.NodeInternalIP:\n\t\t\taddress = nodeAddress.Address\n\t\tcase corev1.NodeExternalIP:\n\t\t\tif address == \"\" {\n\t\t\t\taddress = nodeAddress.Address\n\t\t\t}\n\t\t}\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/%s:%d\", address, runtime.GetRuntimeSupervisorPort(rancherCluster.Spec.KubernetesVersion))\n\tif machine.Annotations[planner.JoinURLAnnotation] == url {\n\t\treturn nil\n\t}\n\n\tmachine = machine.DeepCopy()\n\tif machine.Annotations == nil {\n\t\tmachine.Annotations = map[string]string{}\n\t}\n\n\tmachine.Annotations[planner.JoinURLAnnotation] = url\n\t_, err := h.machines.Update(machine)\n\treturn err\n}\n\nfunc (h *handler) updateMachine(node *corev1.Node, machine *capi.Machine, rancherCluster *v1.Cluster) error {\n\tif err := h.updateMachineJoinURL(node, machine, rancherCluster); err != nil {\n\t\treturn err\n\t}\n\n\tgvk := schema.FromAPIVersionAndKind(machine.Spec.InfrastructureRef.APIVersion, machine.Spec.InfrastructureRef.Kind)\n\tinfra, err := h.dynamic.Get(gvk, machine.Namespace, machine.Spec.InfrastructureRef.Name)\n\tif apierror.IsNotFound(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\td, err := data.Convert(infra)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d.String(\"spec\", \"providerID\") != node.Spec.ProviderID {\n\t\tobj, err := data.Convert(infra.DeepCopyObject())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj.SetNested(node.Status.Addresses, \"status\", \"addresses\")\n\t\tnewObj, err := h.dynamic.UpdateStatus(&unstructured.Unstructured{\n\t\t\tObject: obj,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj, err = data.Convert(newObj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj.SetNested(node.Spec.ProviderID, \"spec\", \"providerID\")\n\t\t_, err = h.dynamic.Update(&unstructured.Unstructured{\n\t\t\tObject: obj,\n\t\t})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ninchatapi\n\nimport (\n\t\"github.com\/ninchat\/ninchat-go\"\n)\n\ntype action interface {\n\tnewClientAction() (*ninchat.Action, error)\n}\n\n\/\/ Call an action with or without a session.\nfunc Call(session *ninchat.Session, events chan<- *ninchat.Event, action action) (err error) {\n\tclientAction, err := action.newClientAction()\n\tif err != nil {\n\t\tclose(events)\n\t\treturn\n\t}\n\n\tclientAction.OnReply = func(e *ninchat.Event) {\n\t\tif e == nil {\n\t\t\tclose(events)\n\t\t} else {\n\t\t\tevents <- e\n\t\t\tif e.LastReply {\n\t\t\t\tclose(events)\n\t\t\t}\n\t\t}\n\t}\n\n\tif session == nil {\n\t\tif _, err = ninchat.Call(clientAction); err != nil {\n\t\t\tclose(events)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif x, found := clientAction.Params[\"action_id\"]; found && x == nil {\n\t\t\tclose(events)\n\t\t\tpanic(\"calling via session but action_id is disabled\")\n\t\t}\n\n\t\tsession.Send(clientAction)\n\t}\n\n\treturn\n}\n\n\/\/ Send an action.\nfunc Send(session *ninchat.Session, action action) (err error) {\n\tclientAction, err := action.newClientAction()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsession.Send(clientAction)\n\treturn\n}\n\nfunc unaryCall(session *ninchat.Session, action action, event eventInit) (ok bool, err error) {\n\tc := make(chan *ninchat.Event, 1) \/\/ XXX: why doesn't this work without buffering?\n\n\tif err = Call(session, c, action); err != nil {\n\t\treturn\n\t}\n\n\tclientEvent := <-c\n\tflush(c)\n\n\tif clientEvent == nil {\n\t\treturn\n\t}\n\n\tok = true\n\n\tif clientEvent.String() == \"error\" {\n\t\terr = newError(clientEvent)\n\t} else {\n\t\terr = event.init(clientEvent)\n\t}\n\treturn\n}\n\nfunc flush(c <-chan *ninchat.Event) {\n\tselect {\n\tcase _, open := <-c:\n\t\tif !open {\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t}\n\n\tgo func() {\n\t\tfor range c {\n\t\t}\n\t}()\n}\n<commit_msg>ninchatapi: exported Action interface<commit_after>package ninchatapi\n\nimport (\n\t\"github.com\/ninchat\/ninchat-go\"\n)\n\n\/\/ Action interface is implemented by all action structs.\ntype Action interface {\n\tnewClientAction() (*ninchat.Action, error)\n}\n\n\/\/ Call an action with or without a session.\nfunc Call(session *ninchat.Session, events chan<- *ninchat.Event, action Action) (err error) {\n\tclientAction, err := action.newClientAction()\n\tif err != nil {\n\t\tclose(events)\n\t\treturn\n\t}\n\n\tclientAction.OnReply = func(e *ninchat.Event) {\n\t\tif e == nil {\n\t\t\tclose(events)\n\t\t} else {\n\t\t\tevents <- e\n\t\t\tif e.LastReply {\n\t\t\t\tclose(events)\n\t\t\t}\n\t\t}\n\t}\n\n\tif session == nil {\n\t\tif _, err = ninchat.Call(clientAction); err != nil {\n\t\t\tclose(events)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif x, found := clientAction.Params[\"action_id\"]; found && x == nil {\n\t\t\tclose(events)\n\t\t\tpanic(\"calling via session but action_id is disabled\")\n\t\t}\n\n\t\tsession.Send(clientAction)\n\t}\n\n\treturn\n}\n\n\/\/ Send an action.\nfunc Send(session *ninchat.Session, action Action) (err error) {\n\tclientAction, err := action.newClientAction()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsession.Send(clientAction)\n\treturn\n}\n\nfunc unaryCall(session *ninchat.Session, action Action, event eventInit) (ok bool, err error) {\n\tc := make(chan *ninchat.Event, 1) \/\/ XXX: why doesn't this work without buffering?\n\n\tif err = Call(session, c, action); err != nil {\n\t\treturn\n\t}\n\n\tclientEvent := <-c\n\tflush(c)\n\n\tif clientEvent == nil {\n\t\treturn\n\t}\n\n\tok = true\n\n\tif clientEvent.String() == \"error\" {\n\t\terr = newError(clientEvent)\n\t} else {\n\t\terr = event.init(clientEvent)\n\t}\n\treturn\n}\n\nfunc flush(c <-chan *ninchat.Event) {\n\tselect {\n\tcase _, open := <-c:\n\t\tif !open {\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t}\n\n\tgo func() {\n\t\tfor range c {\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"encoding\/json\"\n \"flag\"\n \"fmt\"\n log \"github.com\/cihub\/seelog\"\n \"net\"\n \"os\/exec\"\n \"regexp\"\n)\n\nvar Info InfoStruct\nvar devices []Device\n\nvar host string\nvar port string\n\ntype Device struct { \/*{{{*\/\n Id string\n Name string\n State string\n Features []string\n} \/*}}}*\/\ntype InfoStruct struct { \/*{{{*\/\n Id string\n Actions []Action\n Layout []Layout\n State State\n} \/*}}}*\/\ntype Action struct { \/*{{{*\/\n Id string\n Name string\n Arguments []string\n} \/*}}}*\/\ntype Layout struct { \/*{{{*\/\n Id string\n Type string\n Action string\n Using string\n Section string\n} \/*}}}*\/\ntype State struct { \/*{{{*\/\n Devices []Device\n} \/*}}}*\/\n\nfunc main() {\n \/\/ Load logger\n logger, err := log.LoggerFromConfigAsFile(\"..\/logconfig.xml\")\n if err != nil {\n panic(err)\n }\n log.ReplaceLogger(logger)\n\n \/\/ Load flags\n flag.StringVar(&host, \"host\", \"localhost\", \"Stampzilla server hostname\")\n flag.StringVar(&port, \"port\", \"8282\", \"Stampzilla server port\")\n flag.Parse()\n\n log.Info(\"Starting TELLDUS node\")\n\n Info = InfoStruct{}\n Info.Id = \"Tellstick\"\n\n updateActions()\n updateLayout()\n readState()\n\n b, err := json.Marshal(Info)\n if err != nil {\n log.Error(err)\n }\n\n log.Info(\"Connect to \", host, \":\", port)\n c, e := net.Dial(\"tcp\", net.JoinHostPort(host, port))\n if e != nil {\n log.Error(e)\n } else {\n fmt.Fprintf(c, string(b))\n }\n\n select {}\n}\n\nfunc updateActions() {\n Info.Actions = []Action{\n Action{\n \"set\",\n \"Set\",\n []string{\"Devices.Id\"},\n },\n }\n}\n\nfunc updateLayout() {\n Info.Layout = []Layout{\n Layout{\n \"1\",\n \"switch\",\n \"toggle\",\n \"Devices[Type=!dimmable]\",\n \"Lamps\",\n },\n }\n}\n\nfunc readState() {\n out, err := exec.Command(\"tdtool\", \"--list\").Output()\n if err != nil {\n log.Critical(err)\n }\n\n \/\/ Read number of devices\n cnt := regexp.MustCompile(\"Number of devices: ([0-9]+)?\")\n if n := cnt.FindStringSubmatch(string(out)); len(n) > 1 {\n log.Debug(\"tdtool says \", n[1], \" devices\")\n }\n\n \/\/ Read all devices\n findDevices := regexp.MustCompile(\"(?m)^(.+)\\t(.+)\\t(.*)$\")\n if result := findDevices.FindAllStringSubmatch(string(out), -1); len(result) > 0 {\n for _, dev := range result {\n devices = append(devices, Device{dev[1], dev[2], dev[3], []string{\"toggle\"}})\n }\n }\n\n Info.State.Devices = devices\n\n log.Debug(devices)\n}\n<commit_msg>Added device identification thru \/etc\/tellstick.conf<commit_after>package main\n\nimport (\n \"encoding\/json\"\n \"flag\"\n \"fmt\"\n log \"github.com\/cihub\/seelog\"\n \"io\/ioutil\"\n \"net\"\n \"os\/exec\"\n \"regexp\"\n)\n\nvar Info InfoStruct\nvar devices []Device\n\nvar host string\nvar port string\n\ntype Device struct { \/*{{{*\/\n Id string\n Name string\n State string\n Type string\n Features []string\n} \/*}}}*\/\ntype InfoStruct struct { \/*{{{*\/\n Id string\n Actions []Action\n Layout []Layout\n State State\n} \/*}}}*\/\ntype Action struct { \/*{{{*\/\n Id string\n Name string\n Arguments []string\n} \/*}}}*\/\ntype Layout struct { \/*{{{*\/\n Id string\n Type string\n Action string\n Using string\n Section string\n} \/*}}}*\/\ntype State struct { \/*{{{*\/\n Devices []Device\n} \/*}}}*\/\n\nfunc main() {\n \/\/ Load logger\n logger, err := log.LoggerFromConfigAsFile(\"..\/logconfig.xml\")\n if err != nil {\n panic(err)\n }\n log.ReplaceLogger(logger)\n\n \/\/ Load flags\n flag.StringVar(&host, \"host\", \"localhost\", \"Stampzilla server hostname\")\n flag.StringVar(&port, \"port\", \"8282\", \"Stampzilla server port\")\n flag.Parse()\n\n log.Info(\"Starting TELLDUS node\")\n\n Info = InfoStruct{}\n Info.Id = \"Tellstick\"\n\n updateActions()\n updateLayout()\n readState()\n\n b, err := json.Marshal(Info)\n if err != nil {\n log.Error(err)\n }\n\n log.Info(\"Connect to \", host, \":\", port)\n c, e := net.Dial(\"tcp\", net.JoinHostPort(host, port))\n if e != nil {\n log.Error(e)\n } else {\n fmt.Fprintf(c, string(b))\n }\n\n select {}\n}\n\nfunc updateActions() {\n Info.Actions = []Action{\n Action{\n \"set\",\n \"Set\",\n []string{\"Devices.Id\"},\n },\n Action{\n \"toggle\",\n \"Toggle\",\n []string{\"Devices.Id\"},\n },\n }\n}\n\nfunc updateLayout() {\n Info.Layout = []Layout{\n Layout{\n \"1\",\n \"switch\",\n \"toggle\",\n \"Devices[Type=!dimmable]\",\n \"Lamps\",\n },\n }\n}\n\nfunc readState() {\n out, err := exec.Command(\"tdtool\", \"--list\").Output()\n if err != nil {\n log.Critical(err)\n }\n\n \/\/ Read number of devices\n cnt := regexp.MustCompile(\"Number of devices: ([0-9]+)?\")\n if n := cnt.FindStringSubmatch(string(out)); len(n) > 1 {\n log.Debug(\"tdtool says \", n[1], \" devices\")\n }\n\n \/\/ Read all devices\n findDevices := regexp.MustCompile(\"(?m)^(.+)\\t(.+)\\t(.*)$\")\n if result := findDevices.FindAllStringSubmatch(string(out), -1); len(result) > 0 {\n for _, dev := range result {\n devices = append(devices, Device{dev[1], dev[2], dev[3], \"\", []string{\"toggle\"}})\n }\n }\n\n Info.State.Devices = devices\n\n \/\/ Read all features from config\n config, _ := ioutil.ReadFile(\"\/etc\/tellstick.conf\")\n findDevices = regexp.MustCompile(\"(?msU)device {.*id = ([0-9]+).*model = \\\"(.*)\\\".*^}$\")\n if result := findDevices.FindAllStringSubmatch(string(config), -1); len(result) > 0 {\n for _, row := range result {\n for id, dev := range devices {\n if dev.Id == row[1] {\n devices[id].Type = row[2]\n\n switch row[2] {\n case \"selflearning-dimmer\":\n devices[id].Features = append(devices[id].Features, \"dimmable\")\n }\n }\n }\n \/\/devices = append(devices, Device{dev[1], dev[2], dev[3], []string{\"toggle\"}})\n }\n }\n \/*\n \tdevice {\n \t id = 7\n \t name = \"tak bel.\"\n \t protocol = \"arctech\"\n \t model = \"selflearning-dimmer\"\n \t parameters {\n \t\thouse = \"954\"\n \t\tunit = \"2\"\n \t }\n \t}\n *\/\n\n log.Debug(devices)\n}\n<|endoftext|>"} {"text":"<commit_before>package state\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/go-memdb\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n)\n\n\/\/ stateStoreSchema is used to return the schema for the state store\nfunc stateStoreSchema() *memdb.DBSchema {\n\t\/\/ Create the root DB schema\n\tdb := &memdb.DBSchema{\n\t\tTables: make(map[string]*memdb.TableSchema),\n\t}\n\n\t\/\/ Collect all the schemas that are needed\n\tschemas := []func() *memdb.TableSchema{\n\t\tindexTableSchema,\n\t\tnodeTableSchema,\n\t\tjobTableSchema,\n\t\tjobSummarySchema,\n\t\tperiodicLaunchTableSchema,\n\t\tevalTableSchema,\n\t\tallocTableSchema,\n\t\tvaultAccessorTableSchema,\n\t}\n\n\t\/\/ Add each of the tables\n\tfor _, schemaFn := range schemas {\n\t\tschema := schemaFn()\n\t\tif _, ok := db.Tables[schema.Name]; ok {\n\t\t\tpanic(fmt.Sprintf(\"duplicate table name: %s\", schema.Name))\n\t\t}\n\t\tdb.Tables[schema.Name] = schema\n\t}\n\treturn db\n}\n\n\/\/ indexTableSchema is used for\nfunc indexTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"index\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Key\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ nodeTableSchema returns the MemDB schema for the nodes table.\n\/\/ This table is used to store all the client nodes that are registered.\nfunc nodeTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"nodes\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for node management\n\t\t\t\/\/ and simple direct lookup. ID is required to be\n\t\t\t\/\/ unique.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobTableSchema returns the MemDB schema for the jobs table.\n\/\/ This table is used to store all the jobs that have been submitted.\nfunc jobTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"jobs\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for job management\n\t\t\t\/\/ and simple direct lookup. ID is required to be\n\t\t\t\/\/ unique.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"type\": &memdb.IndexSchema{\n\t\t\t\tName: \"type\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Type\",\n\t\t\t\t\tLowercase: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"gc\": &memdb.IndexSchema{\n\t\t\t\tName: \"gc\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.ConditionalIndex{\n\t\t\t\t\tConditional: jobIsGCable,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"periodic\": &memdb.IndexSchema{\n\t\t\t\tName: \"periodic\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.ConditionalIndex{\n\t\t\t\t\tConditional: jobIsPeriodic,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobSummarySchema returns the memdb schema for the job summary table\nfunc jobSummarySchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"job_summary\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobIsGCable satisfies the ConditionalIndexFunc interface and creates an index\n\/\/ on whether a job is eligible for garbage collection.\nfunc jobIsGCable(obj interface{}) (bool, error) {\n\tj, ok := obj.(*structs.Job)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"Unexpected type: %v\", obj)\n\t}\n\n\t\/\/ The job is GCable if it is batch, it is not periodic and is not a\n\t\/\/ parameterized job.\n\tperiodic := j.Periodic != nil && j.Periodic.Enabled\n\tgcable := j.Type == structs.JobTypeBatch && !periodic && !j.IsParameterized()\n\treturn gcable, nil\n}\n\n\/\/ jobIsPeriodic satisfies the ConditionalIndexFunc interface and creates an index\n\/\/ on whether a job is periodic.\nfunc jobIsPeriodic(obj interface{}) (bool, error) {\n\tj, ok := obj.(*structs.Job)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"Unexpected type: %v\", obj)\n\t}\n\n\tif j.Periodic != nil && j.Periodic.Enabled == true {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ periodicLaunchTableSchema returns the MemDB schema tracking the most recent\n\/\/ launch time for a perioidic job.\nfunc periodicLaunchTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"periodic_launch\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for job management\n\t\t\t\/\/ and simple direct lookup. ID is required to be\n\t\t\t\/\/ unique.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ evalTableSchema returns the MemDB schema for the eval table.\n\/\/ This table is used to store all the evaluations that are pending\n\/\/ or recently completed.\nfunc evalTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"evals\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for direct lookup.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Job index is used to lookup allocations by job\n\t\t\t\"job\": &memdb.IndexSchema{\n\t\t\t\tName: \"job\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.CompoundIndex{\n\t\t\t\t\tIndexes: []memdb.Indexer{\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"Status\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ allocTableSchema returns the MemDB schema for the allocation table.\n\/\/ This table is used to store all the task allocations between task groups\n\/\/ and nodes.\nfunc allocTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"allocs\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is a UUID\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Node index is used to lookup allocations by node\n\t\t\t\"node\": &memdb.IndexSchema{\n\t\t\t\tName: \"node\",\n\t\t\t\tAllowMissing: true, \/\/ Missing is allow for failed allocations\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.CompoundIndex{\n\t\t\t\t\tIndexes: []memdb.Indexer{\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"NodeID\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\/\/ Conditional indexer on if allocation is terminal\n\t\t\t\t\t\t&memdb.ConditionalIndex{\n\t\t\t\t\t\t\tConditional: func(obj interface{}) (bool, error) {\n\t\t\t\t\t\t\t\t\/\/ Cast to allocation\n\t\t\t\t\t\t\t\talloc, ok := obj.(*structs.Allocation)\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\treturn false, fmt.Errorf(\"wrong type, got %t should be Allocation\", obj)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\/\/ Check if the allocation is terminal\n\t\t\t\t\t\t\t\treturn alloc.TerminalStatus(), nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Job index is used to lookup allocations by job\n\t\t\t\"job\": &memdb.IndexSchema{\n\t\t\t\tName: \"job\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Eval index is used to lookup allocations by eval\n\t\t\t\"eval\": &memdb.IndexSchema{\n\t\t\t\tName: \"eval\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"EvalID\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ vaultAccessorTableSchema returns the MemDB schema for the Vault Accessor\n\/\/ Table. This table tracks Vault accessors for tokens created on behalf of\n\/\/ allocations required Vault tokens.\nfunc vaultAccessorTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"vault_accessors\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ The primary index is the accessor id\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Accessor\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"alloc_id\": &memdb.IndexSchema{\n\t\t\t\tName: \"alloc_id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"AllocID\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"node_id\": &memdb.IndexSchema{\n\t\t\t\tName: \"node_id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"NodeID\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<commit_msg>Job History schema<commit_after>package state\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/go-memdb\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n)\n\n\/\/ stateStoreSchema is used to return the schema for the state store\nfunc stateStoreSchema() *memdb.DBSchema {\n\t\/\/ Create the root DB schema\n\tdb := &memdb.DBSchema{\n\t\tTables: make(map[string]*memdb.TableSchema),\n\t}\n\n\t\/\/ Collect all the schemas that are needed\n\tschemas := []func() *memdb.TableSchema{\n\t\tindexTableSchema,\n\t\tnodeTableSchema,\n\t\tjobTableSchema,\n\t\tjobSummarySchema,\n\t\tjobHistorySchema,\n\t\tperiodicLaunchTableSchema,\n\t\tevalTableSchema,\n\t\tallocTableSchema,\n\t\tvaultAccessorTableSchema,\n\t}\n\n\t\/\/ Add each of the tables\n\tfor _, schemaFn := range schemas {\n\t\tschema := schemaFn()\n\t\tif _, ok := db.Tables[schema.Name]; ok {\n\t\t\tpanic(fmt.Sprintf(\"duplicate table name: %s\", schema.Name))\n\t\t}\n\t\tdb.Tables[schema.Name] = schema\n\t}\n\treturn db\n}\n\n\/\/ indexTableSchema is used for\nfunc indexTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"index\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Key\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ nodeTableSchema returns the MemDB schema for the nodes table.\n\/\/ This table is used to store all the client nodes that are registered.\nfunc nodeTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"nodes\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for node management\n\t\t\t\/\/ and simple direct lookup. ID is required to be\n\t\t\t\/\/ unique.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobTableSchema returns the MemDB schema for the jobs table.\n\/\/ This table is used to store all the jobs that have been submitted.\nfunc jobTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"jobs\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for job management\n\t\t\t\/\/ and simple direct lookup. ID is required to be\n\t\t\t\/\/ unique.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"type\": &memdb.IndexSchema{\n\t\t\t\tName: \"type\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Type\",\n\t\t\t\t\tLowercase: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"gc\": &memdb.IndexSchema{\n\t\t\t\tName: \"gc\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.ConditionalIndex{\n\t\t\t\t\tConditional: jobIsGCable,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"periodic\": &memdb.IndexSchema{\n\t\t\t\tName: \"periodic\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.ConditionalIndex{\n\t\t\t\t\tConditional: jobIsPeriodic,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobSummarySchema returns the memdb schema for the job summary table\nfunc jobSummarySchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"job_summary\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobHistorySchema returns the memdb schema for the job history table which\n\/\/ keeps a historical view of jobs.\nfunc jobHistorySchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"job_histories\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\n\t\t\t\t\/\/ Use a compound index so the tuple of (JobID, Version) is\n\t\t\t\t\/\/ uniquely identifying\n\t\t\t\tIndexer: &memdb.CompoundIndex{\n\t\t\t\t\tIndexes: []memdb.Indexer{\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\/\/ Will need to create a new indexer\n\t\t\t\t\t\t&memdb.UintFieldIndex{\n\t\t\t\t\t\t\tField: \"Version\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ jobIsGCable satisfies the ConditionalIndexFunc interface and creates an index\n\/\/ on whether a job is eligible for garbage collection.\nfunc jobIsGCable(obj interface{}) (bool, error) {\n\tj, ok := obj.(*structs.Job)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"Unexpected type: %v\", obj)\n\t}\n\n\t\/\/ The job is GCable if it is batch, it is not periodic and is not a\n\t\/\/ parameterized job.\n\tperiodic := j.Periodic != nil && j.Periodic.Enabled\n\tgcable := j.Type == structs.JobTypeBatch && !periodic && !j.IsParameterized()\n\treturn gcable, nil\n}\n\n\/\/ jobIsPeriodic satisfies the ConditionalIndexFunc interface and creates an index\n\/\/ on whether a job is periodic.\nfunc jobIsPeriodic(obj interface{}) (bool, error) {\n\tj, ok := obj.(*structs.Job)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"Unexpected type: %v\", obj)\n\t}\n\n\tif j.Periodic != nil && j.Periodic.Enabled == true {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ periodicLaunchTableSchema returns the MemDB schema tracking the most recent\n\/\/ launch time for a perioidic job.\nfunc periodicLaunchTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"periodic_launch\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for job management\n\t\t\t\/\/ and simple direct lookup. ID is required to be\n\t\t\t\/\/ unique.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ evalTableSchema returns the MemDB schema for the eval table.\n\/\/ This table is used to store all the evaluations that are pending\n\/\/ or recently completed.\nfunc evalTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"evals\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is used for direct lookup.\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Job index is used to lookup allocations by job\n\t\t\t\"job\": &memdb.IndexSchema{\n\t\t\t\tName: \"job\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.CompoundIndex{\n\t\t\t\t\tIndexes: []memdb.Indexer{\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"Status\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ allocTableSchema returns the MemDB schema for the allocation table.\n\/\/ This table is used to store all the task allocations between task groups\n\/\/ and nodes.\nfunc allocTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"allocs\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ Primary index is a UUID\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"ID\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Node index is used to lookup allocations by node\n\t\t\t\"node\": &memdb.IndexSchema{\n\t\t\t\tName: \"node\",\n\t\t\t\tAllowMissing: true, \/\/ Missing is allow for failed allocations\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.CompoundIndex{\n\t\t\t\t\tIndexes: []memdb.Indexer{\n\t\t\t\t\t\t&memdb.StringFieldIndex{\n\t\t\t\t\t\t\tField: \"NodeID\",\n\t\t\t\t\t\t\tLowercase: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\/\/ Conditional indexer on if allocation is terminal\n\t\t\t\t\t\t&memdb.ConditionalIndex{\n\t\t\t\t\t\t\tConditional: func(obj interface{}) (bool, error) {\n\t\t\t\t\t\t\t\t\/\/ Cast to allocation\n\t\t\t\t\t\t\t\talloc, ok := obj.(*structs.Allocation)\n\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\treturn false, fmt.Errorf(\"wrong type, got %t should be Allocation\", obj)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\/\/ Check if the allocation is terminal\n\t\t\t\t\t\t\t\treturn alloc.TerminalStatus(), nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Job index is used to lookup allocations by job\n\t\t\t\"job\": &memdb.IndexSchema{\n\t\t\t\tName: \"job\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"JobID\",\n\t\t\t\t\tLowercase: true,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ Eval index is used to lookup allocations by eval\n\t\t\t\"eval\": &memdb.IndexSchema{\n\t\t\t\tName: \"eval\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.UUIDFieldIndex{\n\t\t\t\t\tField: \"EvalID\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ vaultAccessorTableSchema returns the MemDB schema for the Vault Accessor\n\/\/ Table. This table tracks Vault accessors for tokens created on behalf of\n\/\/ allocations required Vault tokens.\nfunc vaultAccessorTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"vault_accessors\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\/\/ The primary index is the accessor id\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"Accessor\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"alloc_id\": &memdb.IndexSchema{\n\t\t\t\tName: \"alloc_id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"AllocID\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"node_id\": &memdb.IndexSchema{\n\t\t\t\tName: \"node_id\",\n\t\t\t\tAllowMissing: false,\n\t\t\t\tUnique: false,\n\t\t\t\tIndexer: &memdb.StringFieldIndex{\n\t\t\t\t\tField: \"NodeID\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package capabilities\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/services\/containerservice\/mgmt\/2017-09-30\/containerservice\"\n\t\"github.com\/Azure\/go-autorest\/autorest\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/adal\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/azure\"\n\t\"github.com\/mcuadros\/go-version\"\n)\n\nfunc NewAKSVersionsHandler() *AKSVersionHandler {\n\treturn &AKSVersionHandler{}\n}\n\ntype AKSVersionHandler struct {\n}\n\ntype regionCapabilitiesRequestBody struct {\n\t\/\/ BaseURL specifies the Azure Resource management endpoint, it defaults \"https:\/\/management.azure.com\/\".\n\tBaseURL string `json:\"baseUrl\"`\n\t\/\/ AuthBaseURL specifies the Azure OAuth 2.0 authentication endpoint, it defaults \"https:\/\/login.microsoftonline.com\/\".\n\tAuthBaseURL string `json:\"authBaseUrl\"`\n\t\/\/ credentials\n\tClientID string `json:\"clientId\"`\n\tClientSecret string `json:\"clientSecret\"`\n\tSubscriptionID string `json:\"subscriptionId\"`\n\tTenantID string `json:\"tenantId\"`\n\n\tRegion string `json:\"region\"`\n}\n\nfunc (g *AKSVersionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) {\n\tif req.Method != http.MethodPost {\n\t\twriter.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tvar body regionCapabilitiesRequestBody\n\tif err := extractRequestBody(writer, req, &body); err != nil {\n\t\thandleErr(writer, err)\n\t\treturn\n\t}\n\n\tif err := validateRegionRequestBody(writer, &body); err != nil {\n\t\thandleErr(writer, err)\n\t\treturn\n\t}\n\n\tregion := body.Region\n\n\tclientID := body.ClientID\n\tclientSecret := body.ClientSecret\n\tsubscriptionID := body.SubscriptionID\n\ttenantID := body.TenantID\n\n\tbaseURL := body.BaseURL\n\tauthBaseURL := body.AuthBaseURL\n\tif baseURL == \"\" {\n\t\tbaseURL = azure.PublicCloud.ResourceManagerEndpoint\n\t}\n\tif authBaseURL == \"\" {\n\t\tauthBaseURL = azure.PublicCloud.ActiveDirectoryEndpoint\n\t}\n\n\toAuthConfig, err := adal.NewOAuthConfig(authBaseURL, tenantID)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"failed to configure azure oaith: %v\", err))\n\t\treturn\n\t}\n\n\tspToken, err := adal.NewServicePrincipalToken(*oAuthConfig, clientID, clientSecret, baseURL)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"failed to create token: %v\", err))\n\t\treturn\n\t}\n\n\tauthorizer := autorest.NewBearerAuthorizer(spToken)\n\n\tclient := containerservice.NewContainerServicesClientWithBaseURI(baseURL, subscriptionID)\n\tclient.Authorizer = authorizer\n\n\torchestrators, err := client.ListOrchestrators(context.Background(), region, \"managedClusters\")\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"failed to get orchestrators: %v\", err))\n\t\treturn\n\t}\n\n\tif orchestrators.Orchestrators == nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"no version profiles returned: %v\", err))\n\t\treturn\n\t}\n\n\tvar kubernetesVersions []string\n\n\tfor _, profile := range *orchestrators.Orchestrators {\n\t\tif profile.OrchestratorType == nil || profile.OrchestratorVersion == nil {\n\t\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\t\thandleErr(writer, fmt.Errorf(\"unexpected nil orchestrator type or version\"))\n\t\t\treturn\n\t\t}\n\n\t\tif *profile.OrchestratorType == \"Kubernetes\" {\n\t\t\tkubernetesVersions = append(kubernetesVersions, *profile.OrchestratorVersion)\n\t\t}\n\t}\n\n\tsort.Sort(sortableVersion(kubernetesVersions))\n\n\tserialized, err := json.Marshal(kubernetesVersions)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\thandleErr(writer, err)\n\t\treturn\n\t}\n\n\twriter.Write(serialized)\n}\n\ntype sortableVersion []string\n\nfunc (s sortableVersion) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortableVersion) Swap(a, b int) {\n\ts[a], s[b] = s[b], s[a]\n}\n\nfunc (s sortableVersion) Less(a, b int) bool {\n\treturn version.Compare(s[a], s[b], \"<\")\n}\n\nfunc validateRegionRequestBody(writer http.ResponseWriter, body *regionCapabilitiesRequestBody) error {\n\tregion := body.Region\n\n\tclientID := body.ClientID\n\tclientSecret := body.ClientSecret\n\tsubscriptionID := body.SubscriptionID\n\ttenantID := body.TenantID\n\n\tif region == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn fmt.Errorf(\"invalid region\")\n\t}\n\n\tif clientID == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn fmt.Errorf(\"invalid clientID\")\n\t}\n\n\tif clientSecret == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn fmt.Errorf(\"invalid clientSecret\")\n\t}\n\n\tif subscriptionID == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn fmt.Errorf(\"invalid subscriptionID\")\n\t}\n\n\tif tenantID == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn fmt.Errorf(\"invalid tenantID\")\n\t}\n\n\treturn nil\n}\n<commit_msg>Simple refactor validateRegionRequestBody<commit_after>package capabilities\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/services\/containerservice\/mgmt\/2017-09-30\/containerservice\"\n\t\"github.com\/Azure\/go-autorest\/autorest\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/adal\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/azure\"\n\t\"github.com\/mcuadros\/go-version\"\n)\n\nfunc NewAKSVersionsHandler() *AKSVersionHandler {\n\treturn &AKSVersionHandler{}\n}\n\ntype AKSVersionHandler struct {\n}\n\ntype regionCapabilitiesRequestBody struct {\n\t\/\/ BaseURL specifies the Azure Resource management endpoint, it defaults \"https:\/\/management.azure.com\/\".\n\tBaseURL string `json:\"baseUrl\"`\n\t\/\/ AuthBaseURL specifies the Azure OAuth 2.0 authentication endpoint, it defaults \"https:\/\/login.microsoftonline.com\/\".\n\tAuthBaseURL string `json:\"authBaseUrl\"`\n\t\/\/ credentials\n\tClientID string `json:\"clientId\"`\n\tClientSecret string `json:\"clientSecret\"`\n\tSubscriptionID string `json:\"subscriptionId\"`\n\tTenantID string `json:\"tenantId\"`\n\n\tRegion string `json:\"region\"`\n}\n\nfunc (g *AKSVersionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) {\n\tif req.Method != http.MethodPost {\n\t\twriter.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\twriter.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tvar body regionCapabilitiesRequestBody\n\tif err := extractRequestBody(writer, req, &body); err != nil {\n\t\thandleErr(writer, err)\n\t\treturn\n\t}\n\n\tif err := validateRegionRequestBody(writer, &body); err != nil {\n\t\thandleErr(writer, err)\n\t\treturn\n\t}\n\n\tregion := body.Region\n\n\tclientID := body.ClientID\n\tclientSecret := body.ClientSecret\n\tsubscriptionID := body.SubscriptionID\n\ttenantID := body.TenantID\n\n\tbaseURL := body.BaseURL\n\tauthBaseURL := body.AuthBaseURL\n\tif baseURL == \"\" {\n\t\tbaseURL = azure.PublicCloud.ResourceManagerEndpoint\n\t}\n\tif authBaseURL == \"\" {\n\t\tauthBaseURL = azure.PublicCloud.ActiveDirectoryEndpoint\n\t}\n\n\toAuthConfig, err := adal.NewOAuthConfig(authBaseURL, tenantID)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"failed to configure azure oaith: %v\", err))\n\t\treturn\n\t}\n\n\tspToken, err := adal.NewServicePrincipalToken(*oAuthConfig, clientID, clientSecret, baseURL)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"failed to create token: %v\", err))\n\t\treturn\n\t}\n\n\tauthorizer := autorest.NewBearerAuthorizer(spToken)\n\n\tclient := containerservice.NewContainerServicesClientWithBaseURI(baseURL, subscriptionID)\n\tclient.Authorizer = authorizer\n\n\torchestrators, err := client.ListOrchestrators(context.Background(), region, \"managedClusters\")\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"failed to get orchestrators: %v\", err))\n\t\treturn\n\t}\n\n\tif orchestrators.Orchestrators == nil {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\thandleErr(writer, fmt.Errorf(\"no version profiles returned: %v\", err))\n\t\treturn\n\t}\n\n\tvar kubernetesVersions []string\n\n\tfor _, profile := range *orchestrators.Orchestrators {\n\t\tif profile.OrchestratorType == nil || profile.OrchestratorVersion == nil {\n\t\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\t\thandleErr(writer, fmt.Errorf(\"unexpected nil orchestrator type or version\"))\n\t\t\treturn\n\t\t}\n\n\t\tif *profile.OrchestratorType == \"Kubernetes\" {\n\t\t\tkubernetesVersions = append(kubernetesVersions, *profile.OrchestratorVersion)\n\t\t}\n\t}\n\n\tsort.Sort(sortableVersion(kubernetesVersions))\n\n\tserialized, err := json.Marshal(kubernetesVersions)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\thandleErr(writer, err)\n\t\treturn\n\t}\n\n\twriter.Write(serialized)\n}\n\ntype sortableVersion []string\n\nfunc (s sortableVersion) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortableVersion) Swap(a, b int) {\n\ts[a], s[b] = s[b], s[a]\n}\n\nfunc (s sortableVersion) Less(a, b int) bool {\n\treturn version.Compare(s[a], s[b], \"<\")\n}\n\nfunc validateRegionRequestBody(writer http.ResponseWriter, body *regionCapabilitiesRequestBody) error {\n\ttoCheck := map[string]string{\n\t\t\"region\": body.Region,\n\t\t\"clientID\": body.ClientID,\n\t\t\"clientSecret\": body.ClientSecret,\n\t\t\"subscriptionID\": body.SubscriptionID,\n\t\t\"tenantID\": body.TenantID,\n\t}\n\tfor k, v := range toCheck {\n\t\tif v == \"\" {\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\treturn fmt.Errorf(\"invalid %s\", k)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strconv\"\n)\n\nconst (\n\tconnectionString = \"localhost\"\n\thelloWorldString = \"Hello, world!\"\n\tworldRowCount = 10000\n)\n\nvar (\n\tcollection *mgo.Collection\n\tdatabase *mgo.Database\n)\n\ntype Message struct {\n\tMessage string `json:\"message\"`\n}\n\ntype World struct {\n\tId uint16 `json:\"id\"`\n\tRandomNumber uint16 `json:\"randomNumber\"`\n}\n\ntype Fortune struct {\n\tId uint16 `json:\"id\"`\n\tMessage string `json:\"message\"`\n}\n\nfunc main() {\n\tport := \":8228\"\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tif session, err := mgo.Dial(connectionString); err != nil {\n\t\tlog.Fatalf(\"Error opening database: %v\", err)\n\t} else {\n\t\tdefer session.Close()\n\t\tsession.SetPoolLimit(5)\n\t\tdatabase = session.DB(\"hello_world\")\n\t\tcollection = database.C(\"world\")\n\t\thttp.HandleFunc(\"\/json\", jsonHandler)\n\t\thttp.HandleFunc(\"\/db\", dbHandler)\n\t\thttp.HandleFunc(\"queries\", queriesHandler)\n\t\thttp.HandleFunc(\"\/update\", updateHandler)\n\t\thttp.HandleFunc(\"\/plaintext\", plaintextHandler)\n\t\tfmt.Println(\"Serving on http:\/\/localhost\" + port)\n\t\thttp.ListenAndServe(port, nil)\n\t}\n}\n\n\/\/ Helper for random numbers\nfunc getRandomNumber() int {\n\treturn rand.Intn(worldRowCount) + 1\n}\n\n\/\/ Test 1: JSON serialization\nfunc jsonHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/javascript\")\n\tjson.NewEncoder(w).Encode(&Message{helloWorldString})\n}\n\nfunc dbHandler(w http.ResponseWriter, r *http.Request) {\n\tvar world World\n\tquery := bson.M{\"id\": getRandomNumber()}\n\tif collection != nil {\n\t\tif err := collection.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tjson.NewEncoder(w).Encode(&world)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tlog.Fatal(\"Collection not initialized properly\")\n\t}\n}\n\nfunc queriesHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tif n <= 1 {\n\t\tdbHandler(w, r)\n\t\treturn\n\t} else if n > 500 {\n\t\tn = 500\n\t}\n\n\tworlds := make([]World, n)\n\tfor _, world := range worlds {\n\t\tquery := bson.M{\"id\": getRandomNumber()}\n\t\tif err := collection.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(worlds)\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tencoder := json.NewEncoder(w)\n\n\tif n <= 1 {\n\t\tvar world World\n\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\tif err := collection.Update(colQuery, update); err != nil {\n\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t} else {\n\t\t\tworld.Id = colQuery[\"id\"]\n\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"]\n\t\t}\n\t\tencoder.Encode(world)\n\t} else {\n\t\tif n > 500 {\n\t\t\tn = 500\n\t\t}\n\t\tworlds := make([]World, n)\n\t\tfor _, world := range worlds {\n\t\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\t\tif err := collection.Update(colQuery, update); err != nil {\n\t\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tworld.Id = colQuery[\"id\"]\n\t\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"]\n\t\t\t}\n\t\t}\n\t\tencoder.Encode(worlds)\n\t}\n}\n\nfunc plaintextHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"Hello, World!\"))\n}\n<commit_msg>Added uint16 type assertion<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strconv\"\n)\n\nconst (\n\tconnectionString = \"localhost\"\n\thelloWorldString = \"Hello, world!\"\n\tworldRowCount = 10000\n)\n\nvar (\n\tcollection *mgo.Collection\n\tdatabase *mgo.Database\n)\n\ntype Message struct {\n\tMessage string `json:\"message\"`\n}\n\ntype World struct {\n\tId uint16 `json:\"id\"`\n\tRandomNumber uint16 `json:\"randomNumber\"`\n}\n\ntype Fortune struct {\n\tId uint16 `json:\"id\"`\n\tMessage string `json:\"message\"`\n}\n\nfunc main() {\n\tport := \":8228\"\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tif session, err := mgo.Dial(connectionString); err != nil {\n\t\tlog.Fatalf(\"Error opening database: %v\", err)\n\t} else {\n\t\tdefer session.Close()\n\t\tsession.SetPoolLimit(5)\n\t\tdatabase = session.DB(\"hello_world\")\n\t\tcollection = database.C(\"world\")\n\t\thttp.HandleFunc(\"\/json\", jsonHandler)\n\t\thttp.HandleFunc(\"\/db\", dbHandler)\n\t\thttp.HandleFunc(\"queries\", queriesHandler)\n\t\thttp.HandleFunc(\"\/update\", updateHandler)\n\t\thttp.HandleFunc(\"\/plaintext\", plaintextHandler)\n\t\tfmt.Println(\"Serving on http:\/\/localhost\" + port)\n\t\thttp.ListenAndServe(port, nil)\n\t}\n}\n\n\/\/ Helper for random numbers\nfunc getRandomNumber() int {\n\treturn rand.Intn(worldRowCount) + 1\n}\n\n\/\/ Test 1: JSON serialization\nfunc jsonHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/javascript\")\n\tjson.NewEncoder(w).Encode(&Message{helloWorldString})\n}\n\nfunc dbHandler(w http.ResponseWriter, r *http.Request) {\n\tvar world World\n\tquery := bson.M{\"id\": getRandomNumber()}\n\tif collection != nil {\n\t\tif err := collection.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tjson.NewEncoder(w).Encode(&world)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tlog.Fatal(\"Collection not initialized properly\")\n\t}\n}\n\nfunc queriesHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tif n <= 1 {\n\t\tdbHandler(w, r)\n\t\treturn\n\t} else if n > 500 {\n\t\tn = 500\n\t}\n\n\tworlds := make([]World, n)\n\tfor _, world := range worlds {\n\t\tquery := bson.M{\"id\": getRandomNumber()}\n\t\tif err := collection.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(worlds)\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tencoder := json.NewEncoder(w)\n\n\tif n <= 1 {\n\t\tvar world World\n\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\tif err := collection.Update(colQuery, update); err != nil {\n\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t} else {\n\t\t\tworld.Id = colQuery[\"id\"].(uint16)\n\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"].(uint16)\n\t\t}\n\t\tencoder.Encode(world)\n\t} else {\n\t\tif n > 500 {\n\t\t\tn = 500\n\t\t}\n\t\tworlds := make([]World, n)\n\t\tfor _, world := range worlds {\n\t\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\t\tif err := collection.Update(colQuery, update); err != nil {\n\t\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tworld.Id = colQuery[\"id\"].(uint16)\n\t\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"].(uint16)\n\t\t\t}\n\t\t}\n\t\tencoder.Encode(worlds)\n\t}\n}\n\nfunc plaintextHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"Hello, World!\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"html\/template\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n)\n\nconst (\n\tconnectionString = \"localhost\"\n\thelloWorldString = \"Hello, world!\"\n\tworldRowCount = 10000\n)\n\nvar (\n\ttmpl = template.Must(template.ParseFiles(\"templates\/layout.html\", \"templates\/fortune.html\"))\n\n\tdatabase *mgo.Database\n\tfortunes *mgo.Collection\n\tworlds *mgo.Collection\n)\n\ntype Message struct {\n\tMessage string `json:\"message\"`\n}\n\ntype World struct {\n\tId uint16 `json:\"id\"`\n\tRandomNumber uint16 `json:\"randomNumber\"`\n}\n\ntype Fortune struct {\n\tId uint16 `json:\"id\"`\n\tMessage string `json:\"message\"`\n}\n\ntype Fortunes []Fortune\n\nfunc (s Fortunes) Len() int {\n\treturn len(s)\n}\n\nfunc (s Fortunes) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\ntype ByMessage struct{ Fortunes }\n\nfunc (s ByMessage) Less(i, j int) bool {\n\treturn s.Fortunes[i].Message < s.Fortunes[j].Message\n}\n\nfunc main() {\n\tport := \":8228\"\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tif session, err := mgo.Dial(connectionString); err != nil {\n\t\tlog.Fatalf(\"Error opening database: %v\", err)\n\t} else {\n\t\tdefer session.Close()\n\t\tsession.SetPoolLimit(5)\n\t\tdatabase = session.DB(\"hello_world\")\n\t\tworlds = database.C(\"world\")\n\t\tfortunes = database.C(\"fortune\")\n\t\thttp.HandleFunc(\"\/json\", jsonHandler)\n\t\thttp.HandleFunc(\"\/db\", dbHandler)\n\t\thttp.HandleFunc(\"queries\", queriesHandler)\n\t\thttp.HandleFunc(\"\/update\", updateHandler)\n\t\thttp.HandleFunc(\"\/plaintext\", plaintextHandler)\n\t\tfmt.Println(\"Serving on http:\/\/localhost\" + port)\n\t\thttp.ListenAndServe(port, nil)\n\t}\n}\n\n\/\/ Helper for random numbers\nfunc getRandomNumber() int {\n\treturn rand.Intn(worldRowCount) + 1\n}\n\n\/\/ Test 1: JSON serialization\nfunc jsonHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/javascript\")\n\tjson.NewEncoder(w).Encode(&Message{helloWorldString})\n}\n\nfunc dbHandler(w http.ResponseWriter, r *http.Request) {\n\tvar world World\n\tquery := bson.M{\"id\": getRandomNumber()}\n\tif worlds != nil {\n\t\tif err := worlds.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tjson.NewEncoder(w).Encode(&world)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tlog.Fatal(\"Collection not initialized properly\")\n\t}\n}\n\nfunc queriesHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tif n <= 1 {\n\t\tdbHandler(w, r)\n\t\treturn\n\t} else if n > 500 {\n\t\tn = 500\n\t}\n\n\tresult := make([]World, n)\n\tfor _, world := range result {\n\t\tquery := bson.M{\"id\": getRandomNumber()}\n\t\tif err := worlds.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(result)\n}\n\nfunc fortuneHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\tf := make(Fortunes, 16)\n\tif err := fortunes.Find(nil).All(&f); err == nil {\n\t\tf = append(f, Fortune{\n\t\t\tMessage: \"Additional fortune added at request time.\",\n\t\t})\n\t\tsort.Sort(ByMessage{f})\n\t\tif err := tmpl.Execute(w, f); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tencoder := json.NewEncoder(w)\n\n\tif n <= 1 {\n\t\tvar world World\n\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\tif err := worlds.Update(colQuery, update); err != nil {\n\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t} else {\n\t\t\tworld.Id = colQuery[\"id\"].(uint16)\n\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"].(uint16)\n\t\t}\n\t\tencoder.Encode(world)\n\t} else {\n\t\tif n > 500 {\n\t\t\tn = 500\n\t\t}\n\t\tworlds := make([]World, n)\n\t\tfor _, world := range worlds {\n\t\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\t\tif err := worlds.Update(colQuery, update); err != nil {\n\t\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tworld.Id = colQuery[\"id\"].(uint16)\n\t\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"].(uint16)\n\t\t\t}\n\t\t}\n\t\tencoder.Encode(worlds)\n\t}\n}\n\nfunc plaintextHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"Hello, World!\"))\n}\n<commit_msg>Some more cleanup<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"html\/template\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n)\n\nconst (\n\tconnectionString = \"localhost\"\n\thelloWorldString = \"Hello, world!\"\n\tworldRowCount = 10000\n)\n\nvar (\n\ttmpl = template.Must(template.ParseFiles(\"templates\/layout.html\", \"templates\/fortune.html\"))\n\n\tdatabase *mgo.Database\n\tfortunes *mgo.Collection\n\tworlds *mgo.Collection\n)\n\ntype Message struct {\n\tMessage string `json:\"message\"`\n}\n\ntype World struct {\n\tId uint16 `json:\"id\"`\n\tRandomNumber uint16 `json:\"randomNumber\"`\n}\n\ntype Fortune struct {\n\tId uint16 `json:\"id\"`\n\tMessage string `json:\"message\"`\n}\n\ntype Fortunes []Fortune\n\nfunc (s Fortunes) Len() int {\n\treturn len(s)\n}\n\nfunc (s Fortunes) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\ntype ByMessage struct{ Fortunes }\n\nfunc (s ByMessage) Less(i, j int) bool {\n\treturn s.Fortunes[i].Message < s.Fortunes[j].Message\n}\n\nfunc main() {\n\tport := \":8228\"\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tif session, err := mgo.Dial(connectionString); err != nil {\n\t\tlog.Fatalf(\"Error opening database: %v\", err)\n\t} else {\n\t\tdefer session.Close()\n\t\tsession.SetPoolLimit(5)\n\t\tdatabase = session.DB(\"hello_world\")\n\t\tworlds = database.C(\"world\")\n\t\tfortunes = database.C(\"fortune\")\n\t\thttp.HandleFunc(\"\/json\", jsonHandler)\n\t\thttp.HandleFunc(\"\/db\", dbHandler)\n\t\thttp.HandleFunc(\"queries\", queriesHandler)\n\t\thttp.HandleFunc(\"\/update\", updateHandler)\n\t\thttp.HandleFunc(\"\/plaintext\", plaintextHandler)\n\t\tfmt.Println(\"Serving on http:\/\/localhost\" + port)\n\t\thttp.ListenAndServe(port, nil)\n\t}\n}\n\n\/\/ Helper for random numbers\nfunc getRandomNumber() int {\n\treturn rand.Intn(worldRowCount) + 1\n}\n\n\/\/ Test 1: JSON serialization\nfunc jsonHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/javascript\")\n\tjson.NewEncoder(w).Encode(&Message{helloWorldString})\n}\n\nfunc dbHandler(w http.ResponseWriter, r *http.Request) {\n\tvar world World\n\tquery := bson.M{\"id\": getRandomNumber()}\n\tif worlds != nil {\n\t\tif err := worlds.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tjson.NewEncoder(w).Encode(&world)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tlog.Fatal(\"Collection not initialized properly\")\n\t}\n}\n\nfunc queriesHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tif n <= 1 {\n\t\tdbHandler(w, r)\n\t\treturn\n\t} else if n > 500 {\n\t\tn = 500\n\t}\n\n\tresult := make([]World, n)\n\tfor _, world := range result {\n\t\tquery := bson.M{\"id\": getRandomNumber()}\n\t\tif err := worlds.Find(query).One(&world); err != nil {\n\t\t\tlog.Fatalf(\"Error finding world with id: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(result)\n}\n\nfunc fortuneHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\tf := make(Fortunes, 16)\n\tif err := fortunes.Find(nil).All(&f); err == nil {\n\t\tf = append(f, Fortune{\n\t\t\tMessage: \"Additional fortune added at request time.\",\n\t\t})\n\t\tsort.Sort(ByMessage{f})\n\t\tif err := tmpl.Execute(w, f); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\tn := 1\n\tif nStr := r.URL.Query().Get(\"queries\"); len(nStr) > 0 {\n\t\tn, _ = strconv.Atoi(nStr)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tencoder := json.NewEncoder(w)\n\n\tif n <= 1 {\n\t\tvar world World\n\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\tif err := worlds.Update(colQuery, update); err != nil {\n\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t} else {\n\t\t\tworld.Id = colQuery[\"id\"].(uint16)\n\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"].(uint16)\n\t\t}\n\t\tencoder.Encode(world)\n\t} else {\n\t\tif n > 500 {\n\t\t\tn = 500\n\t\t}\n\t\tresult := make([]World, n)\n\t\tfor _, world := range result {\n\t\t\tcolQuery := bson.M{\"id\": getRandomNumber()}\n\t\t\tupdate := bson.M{\"$set\": bson.M{\"randomNumber\": getRandomNumber()}}\n\t\t\tif err := worlds.Update(colQuery, update); err != nil {\n\t\t\t\tlog.Fatalf(\"Error updating world with id: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tworld.Id = colQuery[\"id\"].(uint16)\n\t\t\t\tworld.RandomNumber = update[\"$set\"].(bson.M)[\"randomNumber\"].(uint16)\n\t\t\t}\n\t\t}\n\t\tencoder.Encode(result)\n\t}\n}\n\nfunc plaintextHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(helloWorldString))\n}\n<|endoftext|>"} {"text":"<commit_before>package xsdgen\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\nfunc glob(dir ...string) []string {\n\tfiles, err := filepath.Glob(filepath.Join(dir...))\n\tif err != nil {\n\t\tpanic(\"error in glob util function: \" + err.Error())\n\t}\n\treturn files\n}\n\ntype testLogger testing.T\n\nfunc (t *testLogger) Printf(format string, v ...interface{}) {\n\tt.Logf(format, v...)\n}\n\nfunc TestLibrarySchema(t *testing.T) {\n\ttestGen(t, \"http:\/\/dyomedea.com\/ns\/library\", \"testdata\/library.xsd\")\n}\nfunc TestPurchasOrderSchema(t *testing.T) {\n\ttestGen(t, \"http:\/\/www.example.com\/PO1\", \"testdata\/po1.xsd\")\n}\nfunc TestUSTreasureSDN(t *testing.T) {\n\ttestGen(t, \"http:\/\/tempuri.org\/sdnList.xsd\", \"testdata\/sdn.xsd\")\n}\n\nfunc testGen(t *testing.T, ns string, files ...string) {\n\tfile, err := ioutil.TempFile(\"\", \"xsdgen\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(file.Name())\n\n\tvar cfg Config\n\tcfg.Option(DefaultOptions...)\n\tcfg.Option(LogOutput((*testLogger)(t)))\n\n\targs := []string{\"-v\", \"-o\", file.Name(), \"-ns\", ns}\n\terr = cfg.GenCLI(append(args, files...)...)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif data, err := ioutil.ReadFile(file.Name()); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tt.Logf(\"\\n%s\\n\", data)\n\t}\n}\n<commit_msg>Add test case for SOAP 1.1 schema<commit_after>package xsdgen\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\nfunc glob(dir ...string) []string {\n\tfiles, err := filepath.Glob(filepath.Join(dir...))\n\tif err != nil {\n\t\tpanic(\"error in glob util function: \" + err.Error())\n\t}\n\treturn files\n}\n\ntype testLogger testing.T\n\nfunc (t *testLogger) Printf(format string, v ...interface{}) {\n\tt.Logf(format, v...)\n}\n\nfunc TestLibrarySchema(t *testing.T) {\n\ttestGen(t, \"http:\/\/dyomedea.com\/ns\/library\", \"testdata\/library.xsd\")\n}\nfunc TestPurchasOrderSchema(t *testing.T) {\n\ttestGen(t, \"http:\/\/www.example.com\/PO1\", \"testdata\/po1.xsd\")\n}\nfunc TestUSTreasureSDN(t *testing.T) {\n\ttestGen(t, \"http:\/\/tempuri.org\/sdnList.xsd\", \"testdata\/sdn.xsd\")\n}\nfunc TestSoap(t *testing.T) {\n\ttestGen(t, \"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\", \"testdata\/soap11.xsd\")\n}\n\nfunc testGen(t *testing.T, ns string, files ...string) {\n\tfile, err := ioutil.TempFile(\"\", \"xsdgen\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(file.Name())\n\n\tvar cfg Config\n\tcfg.Option(DefaultOptions...)\n\tcfg.Option(LogOutput((*testLogger)(t)))\n\n\targs := []string{\"-v\", \"-o\", file.Name(), \"-ns\", ns}\n\terr = cfg.GenCLI(append(args, files...)...)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif data, err := ioutil.ReadFile(file.Name()); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tt.Logf(\"\\n%s\\n\", data)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package brightbox\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/brightbox\/gobrightbox\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nconst (\n\tmapped = \"mapped\"\n\tunmapped = \"unmapped\"\n)\n\nfunc resourceBrightboxCloudip() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceBrightboxCloudipCreate,\n\t\tRead: resourceBrightboxCloudipRead,\n\t\tUpdate: resourceBrightboxCloudipUpdate,\n\t\tDelete: resourceBrightboxCloudipDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"target\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"status\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"locked\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"public_ip\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"fqdn\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"reverse_dns\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceBrightboxCloudipCreate(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\n\tlog.Printf(\"[INFO] Creating CloudIP\")\n\tcloudip_opts := &brightbox.CloudIPOptions{}\n\terr := addUpdateableCloudipOptions(d, cloudip_opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcloudip, err := client.CreateCloudIP(cloudip_opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Cloud IP: %s\", err)\n\t}\n\n\td.SetId(cloudip.Id)\n\n\tif target_id, ok := d.GetOk(\"target\"); ok {\n\t\tcloudip, err = assignCloudIP(client, cloudip.Id, target_id.(string))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsetCloudipAttributes(d, cloudip)\n\n\treturn nil\n}\n\nfunc assignCloudIP(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n\ttarget_id string,\n) (*brightbox.CloudIP, error) {\n\tlog.Printf(\"[INFO] Assigning Cloud IP %s to target %s\", cloudip_id, target_id)\n\terr := client.MapCloudIP(cloudip_id, target_id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error assigning Cloud IP %s to target %s: %s\", cloudip_id, target_id, err)\n\t}\n\tcloudip, err := waitForMappedCloudIp(client, cloudip_id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudip, err\n}\n\nfunc unmapCloudIP(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n) error {\n\tlog.Printf(\"[INFO] Checking mapping of Cloud IP %s\", cloudip_id)\n\tcloudip, err := client.CloudIP(cloudip_id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving details of Cloud IP %s: %s\", cloudip_id, err)\n\t}\n\tif cloudip.Status == mapped {\n\t\tlog.Printf(\"[INFO] Unmapping Cloud IP %s\", cloudip_id)\n\t\terr := client.UnMapCloudIP(cloudip_id)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error unmapping Cloud IP %s: %s\", cloudip_id, err)\n\t\t}\n\t\t_, err = waitForUnmappedCloudIp(client, cloudip_id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Printf(\"[DEBUG] Cloud IP %s is already unmapped\", cloudip_id)\n\t}\n\treturn nil\n}\n\nfunc waitForCloudip(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n\tpending string,\n\ttarget string,\n) (*brightbox.CloudIP, error) {\n\tstateConf := resource.StateChangeConf{\n\t\tPending: []string{pending},\n\t\tTarget: []string{target},\n\t\tRefresh: cloudipStateRefresh(client, cloudip_id),\n\t\tTimeout: 5 * time.Minute,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tactive_cloudip, err := stateConf.WaitForState()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn active_cloudip.(*brightbox.CloudIP), err\n}\n\nfunc waitForMappedCloudIp(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n) (*brightbox.CloudIP, error) {\n\treturn waitForCloudip(client, cloudip_id, unmapped, mapped)\n}\n\nfunc waitForUnmappedCloudIp(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n) (*brightbox.CloudIP, error) {\n\treturn waitForCloudip(client, cloudip_id, mapped, unmapped)\n}\n\nfunc cloudipStateRefresh(client *brightbox.Client, cloudip_id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tcloudip, err := client.CloudIP(cloudip_id)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error on Cloud IP State Refresh: %s\", err)\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\treturn cloudip, cloudip.Status, nil\n\t}\n}\n\nfunc setCloudipAttributes(\n\td *schema.ResourceData,\n\tcloudip *brightbox.CloudIP,\n) {\n\td.Set(\"name\", cloudip.Name)\n\td.Set(\"public_ip\", cloudip.PublicIP)\n\td.Set(\"status\", cloudip.Status)\n\td.Set(\"locked\", cloudip.Locked)\n\td.Set(\"reverse_dns\", cloudip.ReverseDns)\n\td.Set(\"fqdn\", cloudip.Fqdn)\n\n}\n\nfunc resourceBrightboxCloudipRead(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\n\tcloudip, err := client.CloudIP(d.Id())\n\tif err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"missing_resource:\") {\n\t\t\tlog.Printf(\"[WARN] CloudIP not found, removing from state: %s\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Error retrieving Cloud IP details: %s\", err)\n\t}\n\n\tsetCloudipAttributes(d, cloudip)\n\n\treturn nil\n}\n\nfunc resourceBrightboxCloudipDelete(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\treturn removeCloudIP(client, d.Id())\n}\n\nfunc removeCloudIP(client *brightbox.Client, id string) error {\n\tlog.Printf(\"[DEBUG] Unmapping Cloud IP %s\", id)\n\terr := unmapCloudIP(client, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[INFO] Deleting Cloud IP %s\", id)\n\terr = client.DestroyCloudIP(id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting Cloud IP (%s): %s\", id, err)\n\t}\n\treturn nil\n}\n\nfunc resourceBrightboxCloudipUpdate(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\n\td.Partial(true)\n\n\tif d.HasChange(\"target\") {\n\t\terr := unmapCloudIP(client, d.Id())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif target_id, ok := d.GetOk(\"target\"); ok {\n\t\t\t_, err := assignCloudIP(client, d.Id(), target_id.(string))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\td.SetPartial(\"target\")\n\t}\n\n\tcloudip_opts := &brightbox.CloudIPOptions{\n\t\tId: d.Id(),\n\t}\n\terr := addUpdateableCloudipOptions(d, cloudip_opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[DEBUG] Cloud IP update configuration: %#v\", cloudip_opts)\n\n\tcloudip, err := client.UpdateCloudIP(cloudip_opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error updating Cloud IP (%s): %s\", cloudip_opts.Id, err)\n\t}\n\n\tsetCloudipAttributes(d, cloudip)\n\td.Partial(false)\n\treturn nil\n}\n\nfunc addUpdateableCloudipOptions(\n\td *schema.ResourceData,\n\topts *brightbox.CloudIPOptions,\n) error {\n\tassign_string(d, &opts.Name, \"name\")\n\tassign_string(d, &opts.ReverseDns, \"reverse_dns\")\n\treturn nil\n}\n<commit_msg>Refactor Cloud Ip<commit_after>package brightbox\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/brightbox\/gobrightbox\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nconst (\n\tmapped = \"mapped\"\n\tunmapped = \"unmapped\"\n)\n\nfunc resourceBrightboxCloudip() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceBrightboxCloudipCreate,\n\t\tRead: resourceBrightboxCloudipRead,\n\t\tUpdate: resourceBrightboxCloudipUpdate,\n\t\tDelete: resourceBrightboxCloudipDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"target\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"status\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"locked\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"public_ip\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"fqdn\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"reverse_dns\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceBrightboxCloudipCreate(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\n\tlog.Printf(\"[INFO] Creating CloudIP\")\n\tcloudip_opts := &brightbox.CloudIPOptions{}\n\terr := addUpdateableCloudipOptions(d, cloudip_opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcloudip, err := client.CreateCloudIP(cloudip_opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Cloud IP: %s\", err)\n\t}\n\n\td.SetId(cloudip.Id)\n\n\tif target_id, ok := d.GetOk(\"target\"); ok {\n\t\tcloudip, err = assignCloudIP(client, cloudip.Id, target_id.(string))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn setCloudipAttributes(d, cloudip)\n}\n\nfunc resourceBrightboxCloudipRead(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\n\tcloudip, err := client.CloudIP(d.Id())\n\tif err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"missing_resource:\") {\n\t\t\tlog.Printf(\"[WARN] CloudIP not found, removing from state: %s\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Error retrieving Cloud IP details: %s\", err)\n\t}\n\n\treturn setCloudipAttributes(d, cloudip)\n}\n\nfunc resourceBrightboxCloudipDelete(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\treturn removeCloudIP(client, d.Id())\n}\n\nfunc resourceBrightboxCloudipUpdate(\n\td *schema.ResourceData,\n\tmeta interface{},\n) error {\n\tclient := meta.(*CompositeClient).ApiClient\n\n\td.Partial(true)\n\n\tif d.HasChange(\"target\") {\n\t\terr := unmapCloudIP(client, d.Id())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif target_id, ok := d.GetOk(\"target\"); ok {\n\t\t\t_, err := assignCloudIP(client, d.Id(), target_id.(string))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\td.SetPartial(\"target\")\n\t}\n\n\tcloudip_opts := &brightbox.CloudIPOptions{\n\t\tId: d.Id(),\n\t}\n\terr := addUpdateableCloudipOptions(d, cloudip_opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[DEBUG] Cloud IP update configuration: %#v\", cloudip_opts)\n\n\tcloudip, err := client.UpdateCloudIP(cloudip_opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error updating Cloud IP (%s): %s\", cloudip_opts.Id, err)\n\t}\n\n\treturn setCloudipAttributes(d, cloudip)\n}\n\nfunc assignCloudIP(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n\ttarget_id string,\n) (*brightbox.CloudIP, error) {\n\tlog.Printf(\"[INFO] Assigning Cloud IP %s to target %s\", cloudip_id, target_id)\n\terr := client.MapCloudIP(cloudip_id, target_id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error assigning Cloud IP %s to target %s: %s\", cloudip_id, target_id, err)\n\t}\n\tcloudip, err := waitForMappedCloudIp(client, cloudip_id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudip, err\n}\n\nfunc unmapCloudIP(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n) error {\n\tlog.Printf(\"[INFO] Checking mapping of Cloud IP %s\", cloudip_id)\n\tcloudip, err := client.CloudIP(cloudip_id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving details of Cloud IP %s: %s\", cloudip_id, err)\n\t}\n\tif cloudip.Status == mapped {\n\t\tlog.Printf(\"[INFO] Unmapping Cloud IP %s\", cloudip_id)\n\t\terr := client.UnMapCloudIP(cloudip_id)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error unmapping Cloud IP %s: %s\", cloudip_id, err)\n\t\t}\n\t\t_, err = waitForUnmappedCloudIp(client, cloudip_id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Printf(\"[DEBUG] Cloud IP %s is already unmapped\", cloudip_id)\n\t}\n\treturn nil\n}\n\nfunc waitForCloudip(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n\tpending string,\n\ttarget string,\n) (*brightbox.CloudIP, error) {\n\tstateConf := resource.StateChangeConf{\n\t\tPending: []string{pending},\n\t\tTarget: []string{target},\n\t\tRefresh: cloudipStateRefresh(client, cloudip_id),\n\t\tTimeout: 5 * time.Minute,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tactive_cloudip, err := stateConf.WaitForState()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn active_cloudip.(*brightbox.CloudIP), err\n}\n\nfunc waitForMappedCloudIp(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n) (*brightbox.CloudIP, error) {\n\treturn waitForCloudip(client, cloudip_id, unmapped, mapped)\n}\n\nfunc waitForUnmappedCloudIp(\n\tclient *brightbox.Client,\n\tcloudip_id string,\n) (*brightbox.CloudIP, error) {\n\treturn waitForCloudip(client, cloudip_id, mapped, unmapped)\n}\n\nfunc cloudipStateRefresh(client *brightbox.Client, cloudip_id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tcloudip, err := client.CloudIP(cloudip_id)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error on Cloud IP State Refresh: %s\", err)\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\treturn cloudip, cloudip.Status, nil\n\t}\n}\n\nfunc setCloudipAttributes(\n\td *schema.ResourceData,\n\tcloudip *brightbox.CloudIP,\n) error {\n\td.Set(\"name\", cloudip.Name)\n\td.Set(\"public_ip\", cloudip.PublicIP)\n\td.Set(\"status\", cloudip.Status)\n\td.Set(\"locked\", cloudip.Locked)\n\td.Set(\"reverse_dns\", cloudip.ReverseDns)\n\td.Set(\"fqdn\", cloudip.Fqdn)\n\td.Partial(false)\n\treturn nil\n}\n\nfunc removeCloudIP(client *brightbox.Client, id string) error {\n\tlog.Printf(\"[DEBUG] Unmapping Cloud IP %s\", id)\n\terr := unmapCloudIP(client, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[INFO] Deleting Cloud IP %s\", id)\n\terr = client.DestroyCloudIP(id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting Cloud IP (%s): %s\", id, err)\n\t}\n\treturn nil\n}\n\nfunc addUpdateableCloudipOptions(\n\td *schema.ResourceData,\n\topts *brightbox.CloudIPOptions,\n) error {\n\tassign_string(d, &opts.Name, \"name\")\n\tassign_string(d, &opts.ReverseDns, \"reverse_dns\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package fabric\n\n\/\/ Basic imports\nimport (\n\t\"context\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\n\/\/ TransportTCPTestSuite -\ntype TransportTCPTestSuite struct {\n\tsuite.Suite\n}\n\nfunc (suite *TransportTCPTestSuite) SetupTest() {\n\n}\n\nfunc (suite *TransportTCPTestSuite) TestCanDial() {\n\ttcp := NewTransportTCP(\"127.0.0.1\", 0)\n\n\taddrsValid := map[string]bool{\n\t\t\"http:something.com:80\/ping\": false,\n\t\t\"tcp:some-dns.com:90\/ping\": true,\n\t\t\"tcp:1.1.1.1:100\/ping\": true,\n\t\t\"\/tcp:1.1.1.1:100\/ping\": false,\n\t}\n\n\tfor addr, can := range addrsValid {\n\t\tres, err := tcp.CanDial(NewAddress(addr))\n\t\tsuite.Assert().Equal(can, res, \"Addr %s should return %b\", addr, can)\n\t\tsuite.Assert().Nil(err, \"Addr %s should not return error\", addr)\n\t}\n\n\taddrsInvalid := map[string]bool{\n\t\t\"tcp:some-dns.com\/ping\": false,\n\t\t\"tcp\/some-dns.com\/ping\": false,\n\t\t\"tcp::\/some-dns.com\/ping\": false,\n\t\t\"tcp:some-dns.com:9999990\/ping\": false,\n\t\t\"tcp:some-dns.com:0\/ping\": false,\n\t\t\"tcp:some-dns.com:\/ping\": false,\n\t}\n\n\tfor addr, can := range addrsInvalid {\n\t\tres, err := tcp.CanDial(NewAddress(addr))\n\t\tsuite.Assert().Equal(can, res, \"Addr %s should return %b\", addr, can)\n\t\tsuite.Assert().NotNil(err, \"Addr %s should not return error\", addr)\n\t}\n}\n\nfunc (suite *TransportTCPTestSuite) TestListenSuccess() {\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\n\thandled := 0\n\n\thandler := func(context.Context, net.Conn) error {\n\t\thandled++\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\tctx := context.Background()\n\ttcps := NewTransportTCP(\"0.0.0.0\", 0)\n\terr := tcps.Listen(ctx, handler)\n\tsuite.Assert().Nil(err)\n\n\taddrs := tcps.Addresses()\n\tsuite.Assert().Len(addrs, 1)\n\n\ttcpc := NewTransportTCP(\"0.0.0.0\", 0)\n\tconn, err := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second * 2):\n\t}\n\n\tsuite.Assert().Nil(err)\n\tsuite.Assert().NotNil(conn)\n\tsuite.Assert().Equal(1, handled)\n}\n\nfunc (suite *TransportTCPTestSuite) TestListenMultipleSuccess() {\n\twg := &sync.WaitGroup{}\n\twg.Add(4)\n\n\thandled := 0\n\n\thandler := func(context.Context, net.Conn) error {\n\t\thandled++\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\tctx := context.Background()\n\ttcps := NewTransportTCP(\"0.0.0.0\", 0)\n\terr := tcps.Listen(ctx, handler)\n\tsuite.Assert().Nil(err)\n\n\taddrs := tcps.Addresses()\n\tsuite.Assert().Len(addrs, 1)\n\n\ttcpc := NewTransportTCP(\"0.0.0.0\", 0)\n\tconn1, err1 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\tconn2, err2 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\tconn3, err3 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\tconn4, err4 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second * 2):\n\t}\n\n\tsuite.Assert().Nil(err1)\n\tsuite.Assert().NotNil(conn1)\n\tsuite.Assert().Nil(err2)\n\tsuite.Assert().NotNil(conn2)\n\tsuite.Assert().Nil(err3)\n\tsuite.Assert().NotNil(conn3)\n\tsuite.Assert().Nil(err4)\n\tsuite.Assert().NotNil(conn4)\n\tsuite.Assert().Equal(4, handled)\n}\n\nfunc TestTransportTCPTestSuite(t *testing.T) {\n\tsuite.Run(t, new(TransportTCPTestSuite))\n}\n<commit_msg>Update test for transport tcp<commit_after>package fabric\n\n\/\/ Basic imports\nimport (\n\t\"context\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\n\/\/ TransportTCPTestSuite -\ntype TransportTCPTestSuite struct {\n\tsuite.Suite\n}\n\nfunc (suite *TransportTCPTestSuite) SetupTest() {\n\n}\n\nfunc (suite *TransportTCPTestSuite) TestCanDial() {\n\ttcp := NewTransportTCP(\"127.0.0.1\", 0)\n\n\taddrsValid := map[string]bool{\n\t\t\"http:something.com:80\/ping\": false,\n\t\t\"tcp:some-dns.com:90\/ping\": true,\n\t\t\"tcp:1.1.1.1:100\/ping\": true,\n\t\t\"\/tcp:1.1.1.1:100\/ping\": false,\n\t}\n\n\tfor addr, can := range addrsValid {\n\t\tres, err := tcp.CanDial(NewAddress(addr))\n\t\tsuite.Assert().Equal(can, res, \"Addr %s should return %b\", addr, can)\n\t\tsuite.Assert().Nil(err, \"Addr %s should not return error\", addr)\n\t}\n\n\taddrsInvalid := map[string]bool{\n\t\t\"tcp:some-dns.com\/ping\": false,\n\t\t\"tcp\/some-dns.com\/ping\": false,\n\t\t\"tcp::\/some-dns.com\/ping\": false,\n\t\t\"tcp:some-dns.com:9999990\/ping\": false,\n\t\t\"tcp:some-dns.com:0\/ping\": false,\n\t\t\"tcp:some-dns.com:\/ping\": false,\n\t}\n\n\tfor addr, can := range addrsInvalid {\n\t\tres, err := tcp.CanDial(NewAddress(addr))\n\t\tsuite.Assert().Equal(can, res, \"Addr %s should return %b\", addr, can)\n\t\tsuite.Assert().NotNil(err, \"Addr %s should not return error\", addr)\n\t}\n}\n\nfunc (suite *TransportTCPTestSuite) TestListenSuccess() {\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\n\thandled := 0\n\n\thandler := func(context.Context, net.Conn) error {\n\t\thandled++\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\tctx := context.Background()\n\ttcps := NewTransportTCP(\"0.0.0.0\", 0)\n\terr := tcps.Listen(ctx, handler)\n\tsuite.Assert().Nil(err)\n\n\taddrs := tcps.Addresses()\n\tsuite.Assert().NotEmpty(addrs)\n\n\ttcpc := NewTransportTCP(\"0.0.0.0\", 0)\n\tconn, err := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second * 2):\n\t}\n\n\tsuite.Assert().Nil(err)\n\tsuite.Assert().NotNil(conn)\n\tsuite.Assert().Equal(1, handled)\n}\n\nfunc (suite *TransportTCPTestSuite) TestListenMultipleSuccess() {\n\twg := &sync.WaitGroup{}\n\twg.Add(4)\n\n\thandled := 0\n\n\thandler := func(context.Context, net.Conn) error {\n\t\thandled++\n\t\twg.Done()\n\t\treturn nil\n\t}\n\n\tctx := context.Background()\n\ttcps := NewTransportTCP(\"0.0.0.0\", 0)\n\terr := tcps.Listen(ctx, handler)\n\tsuite.Assert().Nil(err)\n\n\taddrs := tcps.Addresses()\n\tsuite.Assert().NotEmpty(addrs)\n\n\ttcpc := NewTransportTCP(\"0.0.0.0\", 0)\n\tconn1, err1 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\tconn2, err2 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\tconn3, err3 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\tconn4, err4 := tcpc.DialContext(ctx, NewAddress(addrs[0]))\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second * 2):\n\t}\n\n\tsuite.Assert().Nil(err1)\n\tsuite.Assert().NotNil(conn1)\n\tsuite.Assert().Nil(err2)\n\tsuite.Assert().NotNil(conn2)\n\tsuite.Assert().Nil(err3)\n\tsuite.Assert().NotNil(conn3)\n\tsuite.Assert().Nil(err4)\n\tsuite.Assert().NotNil(conn4)\n\tsuite.Assert().Equal(4, handled)\n}\n\nfunc TestTransportTCPTestSuite(t *testing.T) {\n\tsuite.Run(t, new(TransportTCPTestSuite))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"koding\/artifact\"\n\t\"koding\/kites\/common\"\n\t\"koding\/kites\/terraformer\"\n\n\t\"github.com\/koding\/kite\"\n\tkiteconfig \"github.com\/koding\/kite\/config\"\n\t\"github.com\/koding\/metrics\"\n\t\"github.com\/koding\/multiconfig\"\n)\n\nvar (\n\tName = \"terraformer\"\n\tVersion = \"0.0.1\"\n)\n\nfunc main() {\n\tconf := &terraformer.Config{}\n\n\t\/\/ Load the config, it's reads environment variables or from flags\n\tmulticonfig.New().MustLoad(conf)\n\n\tk := newKite(conf)\n\n\tif conf.Debug {\n\t\tk.Log.Info(\"Debug mode enabled\")\n\t}\n\n\tregisterURL := k.RegisterURL(true)\n\n\tif err := k.RegisterForever(registerURL); err != nil {\n\t\tk.Log.Fatal(err.Error())\n\t}\n\n\tk.Run()\n}\n\nfunc newKite(conf *terraformer.Config) *kite.Kite {\n\tk := kite.New(Name, Version)\n\tk.Config = kiteconfig.MustGet()\n\tk.Config.Port = conf.Port\n\n\tif conf.Region != \"\" {\n\t\tk.Config.Region = conf.Region\n\t}\n\n\tif conf.Environment != \"\" {\n\t\tk.Config.Environment = conf.Environment\n\t}\n\n\tstats, err := metrics.NewDogStatsD(\"terraformer\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tt := terraformer.New()\n\tt.Metrics = stats\n\tt.Log = common.NewLogger(Name, conf.Debug)\n\n\t\/\/ track every kind of call\n\tk.PreHandleFunc(createTracker(stats))\n\n\t\/\/ Terraformer handling methods\n\tk.HandleFunc(\"apply\", t.Apply)\n\tk.HandleFunc(\"destroy\", t.Destroy)\n\tk.HandleFunc(\"plan\", t.Plan)\n\n\tk.HandleHTTPFunc(\"\/healthCheck\", artifact.HealthCheckHandler(Name))\n\tk.HandleHTTPFunc(\"\/version\", artifact.VersionHandler())\n\n\treturn k\n}\n\nfunc createTracker(metrics *metrics.DogStatsD) kite.HandlerFunc {\n\treturn func(r *kite.Request) (interface{}, error) {\n\t\tmetrics.Count(\n\t\t\t\"functionCallCount\", \/\/ metric name\n\t\t\t1, \/\/ count\n\t\t\t[]string{\"funcName:\" + r.Method}, \/\/ tags for metric call\n\t\t\t1.0, \/\/ rate\n\t\t)\n\n\t\treturn true, nil\n\t}\n}\n<commit_msg>Terraformer: use worker name for metrics<commit_after>package main\n\nimport (\n\t\"koding\/artifact\"\n\t\"koding\/kites\/common\"\n\t\"koding\/kites\/terraformer\"\n\n\t\"github.com\/koding\/kite\"\n\tkiteconfig \"github.com\/koding\/kite\/config\"\n\t\"github.com\/koding\/metrics\"\n\t\"github.com\/koding\/multiconfig\"\n)\n\nvar (\n\tName = \"terraformer\"\n\tVersion = \"0.0.1\"\n)\n\nfunc main() {\n\tconf := &terraformer.Config{}\n\n\t\/\/ Load the config, it's reads environment variables or from flags\n\tmulticonfig.New().MustLoad(conf)\n\n\tk := newKite(conf)\n\n\tif conf.Debug {\n\t\tk.Log.Info(\"Debug mode enabled\")\n\t}\n\n\tregisterURL := k.RegisterURL(true)\n\n\tif err := k.RegisterForever(registerURL); err != nil {\n\t\tk.Log.Fatal(err.Error())\n\t}\n\n\tk.Run()\n}\n\nfunc newKite(conf *terraformer.Config) *kite.Kite {\n\tk := kite.New(Name, Version)\n\tk.Config = kiteconfig.MustGet()\n\tk.Config.Port = conf.Port\n\n\tif conf.Region != \"\" {\n\t\tk.Config.Region = conf.Region\n\t}\n\n\tif conf.Environment != \"\" {\n\t\tk.Config.Environment = conf.Environment\n\t}\n\n\tstats, err := metrics.NewDogStatsD(Name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tt := terraformer.New()\n\tt.Metrics = stats\n\tt.Log = common.NewLogger(Name, conf.Debug)\n\n\t\/\/ track every kind of call\n\tk.PreHandleFunc(createTracker(stats))\n\n\t\/\/ Terraformer handling methods\n\tk.HandleFunc(\"apply\", t.Apply)\n\tk.HandleFunc(\"destroy\", t.Destroy)\n\tk.HandleFunc(\"plan\", t.Plan)\n\n\tk.HandleHTTPFunc(\"\/healthCheck\", artifact.HealthCheckHandler(Name))\n\tk.HandleHTTPFunc(\"\/version\", artifact.VersionHandler())\n\n\treturn k\n}\n\nfunc createTracker(metrics *metrics.DogStatsD) kite.HandlerFunc {\n\treturn func(r *kite.Request) (interface{}, error) {\n\t\tmetrics.Count(\n\t\t\t\"functionCallCount\", \/\/ metric name\n\t\t\t1, \/\/ count\n\t\t\t[]string{\"funcName:\" + r.Method}, \/\/ tags for metric call\n\t\t\t1.0, \/\/ rate\n\t\t)\n\n\t\treturn true, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright (c) 2014, Anupam Kapoor. All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n\/\/ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\n\/\/ Author: anupam.kapoor@gmail.com (Anupam Kapoor)\n\/\/\n\/\/ this file implements the shortest path from source to destination\n\/\/ vertices in symbol graphs\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/anupamk\/common-utilz\/graph\/symbol_graph\"\n\t\"github.com\/anupamk\/common-utilz\/graph\/traversal\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/\n\/\/ program input control\n\/\/\nvar (\n\tdata_file string \/\/ data-file\n\tseparator string \/\/ field-separator\n\tsource string \/\/ source-vertex\n\tverbose_debug bool \/\/ just-what-it-sez\n)\n\n\/\/ setup various command line parameters\nfunc init() {\n\t\/\/ application parameterz\n\tflag.StringVar(&data_file, \"input-file\", \"\", \"symbol-graph data file name\")\n\tflag.StringVar(&separator, \"separator\", \"\", \"field separator in the input\")\n\tflag.StringVar(&source, \"source\", \"\", \"source vertex\")\n\n\t\/\/ debugging stuff\n\tflag.BoolVar(&verbose_debug, \"debug\", true, \"generate verbose debugging\")\n\n}\n\n\/\/ rudimentary checks and parameter cleanups\nfunc sanitize_and_validate_cmdline_params() {\n\tif data_file = strings.TrimSpace(data_file); len(data_file) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage-error: bad data-file '%s'\\n\", data_file)\n\t\tflag.Usage()\n\n\t\tos.Exit(255)\n\t}\n\n\t\/\/ don't mess with the separator\n\tif len(separator) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage-error: bad seperator '%s'\\n\", separator)\n\t\tflag.Usage()\n\n\t\tos.Exit(254)\n\t}\n\n\tif source = strings.TrimSpace(source); len(source) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage-error: bad source '%s'\\n\", source)\n\t\tflag.Usage()\n\n\t\tos.Exit(253)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tsanitize_and_validate_cmdline_params()\n\n\tif verbose_debug {\n\t\tlog.Printf(\"-- degree-of-seperation parameter dump: input-file: '%s', separator: '%s', source-vertex: '%s' --\\n\",\n\t\t\tdata_file, separator, source)\n\t}\n\n\t\/\/ parse the file-name and create a symbol graph\n\tsg, err := symbol_graph.LoadFromFile(data_file, separator)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif verbose_debug {\n\t\tsg_g := sg.G()\n\t\tlog.Printf(\"-- symbol-graph created. vertices: '%d', edges: '%d' --\\n\", sg_g.V(), sg_g.E())\n\t}\n\n\tif !sg.Contains(source) {\n\t\tfmt.Fprintf(os.Stderr, \"fatal-error: '%s' not found in data-base\\n\", source)\n\t\tflag.Usage()\n\n\t\tos.Exit(252)\n\t}\n\n\t\/\/\n\t\/\/ ok, so by now, we have a valid graph, and a valid source,\n\t\/\/ let's answer some questions...\n\t\/\/\n\n\t\/\/ determine all paths from source -> other vertices on the\n\t\/\/ graph\n\tall_paths, err := traversal.SingleSourceShortestPaths(sg.G(), sg.Index(source))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"fatal-error: invalid source '%s'\\n\", source)\n\t\tos.Exit(128)\n\t}\n\n\t\/\/ query-rsp loop\n\tfor stdin_reader := bufio.NewReader(os.Stdin); ; {\n\t\tfmt.Fprintf(os.Stdout, \"degree-of-separation --> \")\n\n\t\t\/\/ read and sanitize input\n\t\tdest_name, err := stdin_reader.ReadString('\\n')\n\t\tif err != nil || err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif dest_name = strings.TrimSpace(dest_name); len(dest_name) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif verbose_debug {\n\t\t\tlog.Printf(\"-- user-input: '%s' --\\n\", dest_name)\n\t\t}\n\n\t\t\/\/ got something useful, let's see do we know it ?\n\t\tif !sg.Contains(dest_name) {\n\t\t\tlog.Printf(\"error: '%s' doesn't exist\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ is there a path to destination ?\n\t\tdst_vertex := sg.Index(dest_name)\n\n\t\tif !all_paths.PathExists(dst_vertex) {\n\t\t\tlog.Printf(\"no path to: '%s'\\n\", dest_name)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ yes there is a path, enumerate it...\n\t\tsrc_dst_path := all_paths.PathTo(dst_vertex)\n\n\t\tfmt.Fprintf(os.Stdout, \"%s\\n\", source)\n\t\tfor _, v := range src_dst_path[1:] {\n\t\t\tvertex_name, _ := sg.Name(v)\n\t\t\tfmt.Fprintf(os.Stdout, \" %s\\n\", vertex_name)\n\t\t}\n\t}\n}\n<commit_msg>display path-length during traversal, and some minor corrections<commit_after>\/\/\n\/\/ Copyright (c) 2014, Anupam Kapoor. All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n\/\/ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\n\/\/ Author: anupam.kapoor@gmail.com (Anupam Kapoor)\n\/\/\n\/\/ this file implements the shortest path from source to destination\n\/\/ vertices in symbol graphs\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/anupamk\/common-utilz\/graph\/symbol_graph\"\n\t\"github.com\/anupamk\/common-utilz\/graph\/traversal\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/\n\/\/ program input control\n\/\/\nvar (\n\tdata_file string \/\/ data-file\n\tseparator string \/\/ field-separator\n\tsource string \/\/ source-vertex\n\tverbose_debug bool \/\/ just-what-it-sez\n)\n\n\/\/ setup various command line parameters\nfunc init() {\n\t\/\/ application parameterz\n\tflag.StringVar(&data_file, \"input-file\", \"\", \"symbol-graph data file name\")\n\tflag.StringVar(&separator, \"separator\", \"\", \"field separator in the input\")\n\tflag.StringVar(&source, \"source\", \"\", \"source vertex\")\n\n\t\/\/ debugging stuff\n\tflag.BoolVar(&verbose_debug, \"debug\", true, \"generate verbose debugging\")\n\n}\n\n\/\/ rudimentary checks and parameter cleanups\nfunc sanitize_and_validate_cmdline_params() {\n\tif data_file = strings.TrimSpace(data_file); len(data_file) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage-error: bad data-file '%s'\\n\", data_file)\n\t\tflag.Usage()\n\n\t\tos.Exit(255)\n\t}\n\n\t\/\/ don't mess with the separator\n\tif len(separator) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage-error: bad seperator '%s'\\n\", separator)\n\t\tflag.Usage()\n\n\t\tos.Exit(254)\n\t}\n\n\tif source = strings.TrimSpace(source); len(source) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"usage-error: bad source '%s'\\n\", source)\n\t\tflag.Usage()\n\n\t\tos.Exit(253)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tsanitize_and_validate_cmdline_params()\n\n\tif verbose_debug {\n\t\tlog.Printf(\"-- degree-of-seperation parameter dump: input-file: '%s', separator: '%s', source-vertex: '%s' --\\n\",\n\t\t\tdata_file, separator, source)\n\t}\n\n\t\/\/ parse the file-name and create a symbol graph\n\tsg, err := symbol_graph.LoadFromFile(data_file, separator)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif verbose_debug {\n\t\tsg_g := sg.G()\n\t\tlog.Printf(\"-- symbol-graph created. vertices: '%d', edges: '%d' --\\n\", sg_g.V(), sg_g.E())\n\t}\n\n\tif !sg.Contains(source) {\n\t\tfmt.Fprintf(os.Stderr, \"fatal-error: '%s' not found in data-base\\n\", source)\n\t\tflag.Usage()\n\n\t\tos.Exit(252)\n\t}\n\n\t\/\/\n\t\/\/ ok, so by now, we have a valid graph, and a valid source,\n\t\/\/ let's answer some questions...\n\t\/\/\n\n\t\/\/ determine all paths from source -> other vertices on the\n\t\/\/ graph\n\tall_paths, err := traversal.SingleSourceShortestPaths(sg.G(), sg.Index(source))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"fatal-error: invalid source '%s'\\n\", source)\n\t\tos.Exit(128)\n\t}\n\n\t\/\/ query-rsp loop\n\tfor stdin_reader := bufio.NewReader(os.Stdin); ; {\n\t\tfmt.Fprintf(os.Stdout, \"degree-of-separation --> \")\n\n\t\t\/\/ read and sanitize input\n\t\tdest_name, err := stdin_reader.ReadString('\\n')\n\t\tif err != nil || err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif dest_name = strings.TrimSpace(dest_name); len(dest_name) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif verbose_debug {\n\t\t\tlog.Printf(\"-- user-input: '%s' --\\n\", dest_name)\n\t\t}\n\n\t\t\/\/ got something useful, let's see do we know it ?\n\t\tif !sg.Contains(dest_name) {\n\t\t\tlog.Printf(\"error: '%s' doesn't exist\\n\", dest_name)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ is there a path to destination ?\n\t\tdst_vertex := sg.Index(dest_name)\n\n\t\tif !all_paths.PathExists(dst_vertex) {\n\t\t\tlog.Printf(\"no path to: '%s'\\n\", dest_name)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ yes there is a path, enumerate it...\n\t\tsrc_dst_path := all_paths.PathTo(dst_vertex)\n\n\t\tfmt.Fprintf(os.Stdout, \"source: %s, path-length: %d\\npath:\\n\", source, len(src_dst_path)-1)\n\t\tfor _, v := range src_dst_path[1:] {\n\t\t\tvertex_name, _ := sg.Name(v)\n\t\t\tfmt.Fprintf(os.Stdout, \" %s\\n\", vertex_name)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package classic\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/go-oracle-terraform\/compute\"\n\t\"github.com\/hashicorp\/packer\/common\/uuid\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\t\"github.com\/hashicorp\/packer\/packer\"\n)\n\ntype stepSecurity struct{}\n\nfunc (s *stepSecurity) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {\n\tui := state.Get(\"ui\").(packer.Ui)\n\tconfig := state.Get(\"config\").(*Config)\n\n\tcommType := \"\"\n\tif config.Comm.Type == \"ssh\" {\n\t\tcommType = \"SSH\"\n\t} else if config.Comm.Type == \"winrm\" {\n\t\tcommType = \"WINRM\"\n\t}\n\n\tui.Say(fmt.Sprintf(\"Configuring security lists and rules to enable %s access...\", commType))\n\n\tclient := state.Get(\"client\").(*compute.ComputeClient)\n\trunUUID := uuid.TimeOrderedUUID()\n\n\tnamePrefix := fmt.Sprintf(\"\/Compute-%s\/%s\/\", config.IdentityDomain, config.Username)\n\tsecListName := fmt.Sprintf(\"Packer_%s_Allow_%s_%s\", commType, config.ImageName, runUUID)\n\tsecListClient := client.SecurityLists()\n\tsecListInput := compute.CreateSecurityListInput{\n\t\tDescription: fmt.Sprintf(\"Packer-generated security list to give packer %s access\", commType),\n\t\tName: namePrefix + secListName,\n\t}\n\t_, err := secListClient.CreateSecurityList(&secListInput)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"already exists\") {\n\t\t\terr = fmt.Errorf(\"Error creating security List to\"+\n\t\t\t\t\" allow Packer to connect to Oracle instance via %s: %s\", commType, err)\n\t\t\tui.Error(err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t}\n\t\/\/ DOCS NOTE: user must have Compute_Operations role\n\t\/\/ Create security rule that allows Packer to connect via SSH or winRM\n\tvar application string\n\tif commType == \"SSH\" {\n\t\tapplication = \"\/oracle\/public\/ssh\"\n\t} else if commType == \"WINRM\" {\n\t\t\/\/ Check to see whether a winRM security protocol is already defined;\n\t\t\/\/ don't need to do this for SSH because it is built into the Oracle API.\n\t\tprotocolClient := client.SecurityProtocols()\n\t\twinrmProtocol := fmt.Sprintf(\"WINRM_%s\", runUUID)\n\t\tinput := compute.CreateSecurityProtocolInput{\n\t\t\tName: winrmProtocol,\n\t\t\tDescription: \"packer-generated protocol to allow winRM communicator\",\n\t\t\tDstPortSet: []string{\"5985\", \"5986\", \"443\"}, \/\/ TODO make configurable\n\t\t\tIPProtocol: \"tcp\",\n\t\t}\n\t\t_, err = protocolClient.CreateSecurityProtocol(&input)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Error creating security protocol to\"+\n\t\t\t\t\" allow Packer to connect to Oracle instance via %s: %s\", commType, err)\n\t\t\tui.Error(err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tstate.Put(\"winrm_protocol\", winrmProtocol)\n\n\t\t\/\/ Check to see whether a winRM security application is already defined\n\t\tapplicationClient := client.SecurityApplications()\n\t\tapplication = fmt.Sprintf(\"packer_winRM_%s\", runUUID)\n\t\tapplicationInput := compute.CreateSecurityApplicationInput{\n\t\t\tDescription: \"Allows Packer to connect to instance via winRM\",\n\t\t\tDPort: \"5985-5986\",\n\t\t\tName: application,\n\t\t\tProtocol: \"TCP\",\n\t\t}\n\t\t_, err = applicationClient.CreateSecurityApplication(&applicationInput)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Error creating security application to\"+\n\t\t\t\t\" allow Packer to connect to Oracle instance via %s: %s\", commType, err)\n\t\t\tui.Error(err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tstate.Put(\"winrm_application\", application)\n\t}\n\tsecRulesClient := client.SecRules()\n\tsecRuleName := fmt.Sprintf(\"Packer-allow-%s-Rule_%s_%s\", commType,\n\t\tconfig.ImageName, runUUID)\n\tsecRulesInput := compute.CreateSecRuleInput{\n\t\tAction: \"PERMIT\",\n\t\tApplication: application,\n\t\tDescription: \"Packer-generated security rule to allow ssh\/winrm\",\n\t\tDestinationList: \"seclist:\" + namePrefix + secListName,\n\t\tName: namePrefix + secRuleName,\n\t\tSourceList: config.SSHSourceList,\n\t}\n\n\t_, err = secRulesClient.CreateSecRule(&secRulesInput)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error creating security rule to\"+\n\t\t\t\" allow Packer to connect to Oracle instance: %s\", err)\n\t\tui.Error(err.Error())\n\t\tstate.Put(\"error\", err)\n\t\treturn multistep.ActionHalt\n\t}\n\tstate.Put(\"security_rule_name\", secRuleName)\n\tstate.Put(\"security_list\", secListName)\n\treturn multistep.ActionContinue\n}\n\nfunc (s *stepSecurity) Cleanup(state multistep.StateBag) {\n\tclient := state.Get(\"client\").(*compute.ComputeClient)\n\tui := state.Get(\"ui\").(packer.Ui)\n\tconfig := state.Get(\"config\").(*Config)\n\n\tui.Say(\"Deleting temporary rules and lists...\")\n\n\tnamePrefix := fmt.Sprintf(\"\/Compute-%s\/%s\/\", config.IdentityDomain, config.Username)\n\t\/\/ delete security rules that Packer generated\n\tsecRuleName := state.Get(\"security_rule_name\").(string)\n\tsecRulesClient := client.SecRules()\n\truleInput := compute.DeleteSecRuleInput{Name: namePrefix + secRuleName}\n\terr := secRulesClient.DeleteSecRule(&ruleInput)\n\tif err != nil {\n\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated security rule %s; \"+\n\t\t\t\"please delete manually. (error: %s)\", secRuleName, err.Error()))\n\t}\n\n\t\/\/ delete security list that Packer generated\n\tsecListName := state.Get(\"security_list\").(string)\n\tsecListClient := client.SecurityLists()\n\tinput := compute.DeleteSecurityListInput{Name: namePrefix + secListName}\n\terr = secListClient.DeleteSecurityList(&input)\n\tif err != nil {\n\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated security list %s; \"+\n\t\t\t\"please delete manually. (error : %s)\", secListName, err.Error()))\n\t}\n\n\t\/\/ Some extra cleanup if we used the winRM communicator\n\tif config.Comm.Type == \"winrm\" {\n\t\t\/\/ Delete the packer-generated protocol\n\t\tprotocol := state.Get(\"winrm_protocol\").(string)\n\t\tprotocolClient := client.SecurityProtocols()\n\t\tdeleteProtocolInput := compute.DeleteSecurityProtocolInput{\n\t\t\tName: namePrefix + protocol,\n\t\t}\n\t\terr = protocolClient.DeleteSecurityProtocol(&deleteProtocolInput)\n\t\tif err != nil {\n\t\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated winrm security protocol %s; \"+\n\t\t\t\t\"please delete manually. (error : %s)\", protocol, err.Error()))\n\t\t}\n\n\t\t\/\/ Delete the packer-generated application\n\t\tapplication := state.Get(\"winrm_application\").(string)\n\t\tapplicationClient := client.SecurityApplications()\n\t\tdeleteApplicationInput := compute.DeleteSecurityApplicationInput{\n\t\t\tName: namePrefix + application,\n\t\t}\n\t\terr = applicationClient.DeleteSecurityApplication(&deleteApplicationInput)\n\t\tif err != nil {\n\t\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated winrm security application %s; \"+\n\t\t\t\t\"please delete manually. (error : %s)\", application, err.Error()))\n\t\t}\n\t}\n\n}\n<commit_msg>remove redundant security_protocol code from the winrm implementation of the oracle-classic provisioner<commit_after>package classic\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/go-oracle-terraform\/compute\"\n\t\"github.com\/hashicorp\/packer\/common\/uuid\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\t\"github.com\/hashicorp\/packer\/packer\"\n)\n\ntype stepSecurity struct{}\n\nfunc (s *stepSecurity) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {\n\tui := state.Get(\"ui\").(packer.Ui)\n\tconfig := state.Get(\"config\").(*Config)\n\n\tcommType := \"\"\n\tif config.Comm.Type == \"ssh\" {\n\t\tcommType = \"SSH\"\n\t} else if config.Comm.Type == \"winrm\" {\n\t\tcommType = \"WINRM\"\n\t}\n\n\tui.Say(fmt.Sprintf(\"Configuring security lists and rules to enable %s access...\", commType))\n\n\tclient := state.Get(\"client\").(*compute.ComputeClient)\n\trunUUID := uuid.TimeOrderedUUID()\n\n\tnamePrefix := fmt.Sprintf(\"\/Compute-%s\/%s\/\", config.IdentityDomain, config.Username)\n\tsecListName := fmt.Sprintf(\"Packer_%s_Allow_%s_%s\", commType, config.ImageName, runUUID)\n\tsecListClient := client.SecurityLists()\n\tsecListInput := compute.CreateSecurityListInput{\n\t\tDescription: fmt.Sprintf(\"Packer-generated security list to give packer %s access\", commType),\n\t\tName: namePrefix + secListName,\n\t}\n\t_, err := secListClient.CreateSecurityList(&secListInput)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"already exists\") {\n\t\t\terr = fmt.Errorf(\"Error creating security List to\"+\n\t\t\t\t\" allow Packer to connect to Oracle instance via %s: %s\", commType, err)\n\t\t\tui.Error(err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t}\n\t\/\/ DOCS NOTE: user must have Compute_Operations role\n\t\/\/ Create security rule that allows Packer to connect via SSH or winRM\n\tvar application string\n\tif commType == \"SSH\" {\n\t\tapplication = \"\/oracle\/public\/ssh\"\n\t} else if commType == \"WINRM\" {\n\t\t\/\/ Check to see whether a winRM security application is already defined\n\t\tapplicationClient := client.SecurityApplications()\n\t\tapplication = fmt.Sprintf(\"packer_winRM_%s\", runUUID)\n\t\tapplicationInput := compute.CreateSecurityApplicationInput{\n\t\t\tDescription: \"Allows Packer to connect to instance via winRM\",\n\t\t\tDPort: \"5985-5986\",\n\t\t\tName: application,\n\t\t\tProtocol: \"TCP\",\n\t\t}\n\t\t_, err = applicationClient.CreateSecurityApplication(&applicationInput)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Error creating security application to\"+\n\t\t\t\t\" allow Packer to connect to Oracle instance via %s: %s\", commType, err)\n\t\t\tui.Error(err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tstate.Put(\"winrm_application\", application)\n\t}\n\tsecRulesClient := client.SecRules()\n\tsecRuleName := fmt.Sprintf(\"Packer-allow-%s-Rule_%s_%s\", commType,\n\t\tconfig.ImageName, runUUID)\n\tsecRulesInput := compute.CreateSecRuleInput{\n\t\tAction: \"PERMIT\",\n\t\tApplication: application,\n\t\tDescription: \"Packer-generated security rule to allow ssh\/winrm\",\n\t\tDestinationList: \"seclist:\" + namePrefix + secListName,\n\t\tName: namePrefix + secRuleName,\n\t\tSourceList: config.SSHSourceList,\n\t}\n\n\t_, err = secRulesClient.CreateSecRule(&secRulesInput)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error creating security rule to\"+\n\t\t\t\" allow Packer to connect to Oracle instance: %s\", err)\n\t\tui.Error(err.Error())\n\t\tstate.Put(\"error\", err)\n\t\treturn multistep.ActionHalt\n\t}\n\tstate.Put(\"security_rule_name\", secRuleName)\n\tstate.Put(\"security_list\", secListName)\n\treturn multistep.ActionContinue\n}\n\nfunc (s *stepSecurity) Cleanup(state multistep.StateBag) {\n\tclient := state.Get(\"client\").(*compute.ComputeClient)\n\tui := state.Get(\"ui\").(packer.Ui)\n\tconfig := state.Get(\"config\").(*Config)\n\n\tui.Say(\"Deleting temporary rules and lists...\")\n\n\tnamePrefix := fmt.Sprintf(\"\/Compute-%s\/%s\/\", config.IdentityDomain, config.Username)\n\t\/\/ delete security rules that Packer generated\n\tsecRuleName := state.Get(\"security_rule_name\").(string)\n\tsecRulesClient := client.SecRules()\n\truleInput := compute.DeleteSecRuleInput{Name: namePrefix + secRuleName}\n\terr := secRulesClient.DeleteSecRule(&ruleInput)\n\tif err != nil {\n\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated security rule %s; \"+\n\t\t\t\"please delete manually. (error: %s)\", secRuleName, err.Error()))\n\t}\n\n\t\/\/ delete security list that Packer generated\n\tsecListName := state.Get(\"security_list\").(string)\n\tsecListClient := client.SecurityLists()\n\tinput := compute.DeleteSecurityListInput{Name: namePrefix + secListName}\n\terr = secListClient.DeleteSecurityList(&input)\n\tif err != nil {\n\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated security list %s; \"+\n\t\t\t\"please delete manually. (error : %s)\", secListName, err.Error()))\n\t}\n\n\t\/\/ Some extra cleanup if we used the winRM communicator\n\tif config.Comm.Type == \"winrm\" {\n\t\t\/\/ Delete the packer-generated application\n\t\tapplication := state.Get(\"winrm_application\").(string)\n\t\tapplicationClient := client.SecurityApplications()\n\t\tdeleteApplicationInput := compute.DeleteSecurityApplicationInput{\n\t\t\tName: namePrefix + application,\n\t\t}\n\t\terr = applicationClient.DeleteSecurityApplication(&deleteApplicationInput)\n\t\tif err != nil {\n\t\t\tui.Say(fmt.Sprintf(\"Error deleting the packer-generated winrm security application %s; \"+\n\t\t\t\t\"please delete manually. (error : %s)\", application, err.Error()))\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:generate struct-markdown\n\/\/go:generate mapstructure-to-hcl2 -type MachineImageFilter\n\npackage triton\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/packer\/hcl2template\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n)\n\n\/\/ SourceMachineConfig represents the configuration to run a machine using\n\/\/ the SDC API in order for provisioning to take place.\ntype SourceMachineConfig struct {\n\t\/\/ Name of the VM used for building the\n\t\/\/ image. Does not affect (and does not have to be the same) as the name for a\n\t\/\/ VM instance running this image. Maximum 512 characters but should in\n\t\/\/ practice be much shorter (think between 5 and 20 characters). For example\n\t\/\/ mysql-64-server-image-builder. When omitted defaults to\n\t\/\/ packer-builder-[image_name].\n\tMachineName string `mapstructure:\"source_machine_name\" required:\"false\"`\n\t\/\/ The Triton package to use while\n\t\/\/ building the image. Does not affect (and does not have to be the same) as\n\t\/\/ the package which will be used for a VM instance running this image. On the\n\t\/\/ Joyent public cloud this could for example be g3-standard-0.5-smartos.\n\tMachinePackage string `mapstructure:\"source_machine_package\" required:\"true\"`\n\t\/\/ The UUID of the image to base the new\n\t\/\/ image on. Triton supports multiple types of images, called 'brands' in\n\t\/\/ Triton \/ Joyent lingo, for contains and VM's. See the chapter Containers\n\t\/\/ and virtual machines in\n\t\/\/ the Joyent Triton documentation for detailed information. The following\n\t\/\/ brands are currently supported by this builder:joyent andkvm. The\n\t\/\/ choice of base image automatically decides the brand. On the Joyent public\n\t\/\/ cloud a valid source_machine_image could for example be\n\t\/\/ 70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit\n\t\/\/ SmartOS base image (a 'joyent' brand image). source_machine_image_filter\n\t\/\/ can be used to populate this UUID.\n\tMachineImage string `mapstructure:\"source_machine_image\" required:\"true\"`\n\t\/\/ The UUID's of Triton\n\t\/\/ networks added to the source machine used for creating the image. For\n\t\/\/ example if any of the provisioners which are run need Internet access you\n\t\/\/ will need to add the UUID's of the appropriate networks here. If this is\n\t\/\/ not specified, instances will be placed into the default Triton public and\n\t\/\/ internal networks.\n\tMachineNetworks []string `mapstructure:\"source_machine_networks\" required:\"false\"`\n\t\/\/ Triton metadata\n\t\/\/ applied to the VM used to create the image. Metadata can be used to pass\n\t\/\/ configuration information to the VM without the need for networking. See\n\t\/\/ Using the metadata\n\t\/\/ API in the\n\t\/\/ Joyent documentation for more information. This can for example be used to\n\t\/\/ set the user-script metadata key to have Triton start a user supplied\n\t\/\/ script after the VM has booted.\n\tMachineMetadata map[string]string `mapstructure:\"source_machine_metadata\" required:\"false\"`\n\t\/\/ Tags applied to the VM used to create the image.\n\tMachineTags map[string]string `mapstructure:\"source_machine_tags\" required:\"false\"`\n\t\/\/ Same as [`source_machine_tags`](#source_machine_tags) but defined as a\n\t\/\/ singular block containing a key and a value field. In HCL2 mode the\n\t\/\/ [`dynamic_block`](https:\/\/packer.io\/docs\/configuration\/from-1.5\/expressions.html#dynamic-blocks)\n\t\/\/ will allow you to create those programatically.\n\tMachineTag hcl2template.KeyValues `mapstructure:\"source_machine_tag\" required:\"false\"`\n\t\/\/ Whether or not the firewall\n\t\/\/ of the VM used to create an image of is enabled. The Triton firewall only\n\t\/\/ filters inbound traffic to the VM. All outbound traffic is always allowed.\n\t\/\/ Currently this builder does not provide an interface to add specific\n\t\/\/ firewall rules. Unless you have a global rule defined in Triton which\n\t\/\/ allows SSH traffic enabling the firewall will interfere with the SSH\n\t\/\/ provisioner. The default is false.\n\tMachineFirewallEnabled bool `mapstructure:\"source_machine_firewall_enabled\" required:\"false\"`\n\t\/\/ Filters used to populate the\n\t\/\/ source_machine_image field. Example:\n\tMachineImageFilters MachineImageFilter `mapstructure:\"source_machine_image_filter\" required:\"false\"`\n}\n\ntype MachineImageFilter struct {\n\tMostRecent bool `mapstructure:\"most_recent\"`\n\tName string\n\tOS string\n\tVersion string\n\tPublic bool\n\tState string\n\tOwner string\n\tType string\n}\n\nfunc (m *MachineImageFilter) Empty() bool {\n\treturn m.Name == \"\" && m.OS == \"\" && m.Version == \"\" && m.State == \"\" && m.Owner == \"\" && m.Type == \"\"\n}\n\n\/\/ Prepare performs basic validation on a SourceMachineConfig struct.\nfunc (c *SourceMachineConfig) Prepare(ctx *interpolate.Context) []error {\n\tvar errs []error\n\n\tif c.MachinePackage == \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"A source_machine_package must be specified\"))\n\t}\n\n\tif c.MachineImage != \"\" && c.MachineImageFilters.Name != \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"You cannot specify a Machine Image and also Machine Name filter\"))\n\t}\n\n\tif c.MachineNetworks == nil {\n\t\tc.MachineNetworks = []string{}\n\t}\n\n\tif c.MachineMetadata == nil {\n\t\tc.MachineMetadata = make(map[string]string)\n\t}\n\n\tif c.MachineTags == nil {\n\t\tc.MachineTags = make(map[string]string)\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn errs\n\t}\n\n\treturn nil\n}\n<commit_msg>Update source_machine_config.go<commit_after>\/\/go:generate struct-markdown\n\/\/go:generate mapstructure-to-hcl2 -type MachineImageFilter\n\npackage triton\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/packer\/hcl2template\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n)\n\n\/\/ SourceMachineConfig represents the configuration to run a machine using\n\/\/ the SDC API in order for provisioning to take place.\ntype SourceMachineConfig struct {\n\t\/\/ Name of the VM used for building the\n\t\/\/ image. Does not affect (and does not have to be the same) as the name for a\n\t\/\/ VM instance running this image. Maximum 512 characters but should in\n\t\/\/ practice be much shorter (think between 5 and 20 characters). For example\n\t\/\/ mysql-64-server-image-builder. When omitted defaults to\n\t\/\/ packer-builder-[image_name].\n\tMachineName string `mapstructure:\"source_machine_name\" required:\"false\"`\n\t\/\/ The Triton package to use while\n\t\/\/ building the image. Does not affect (and does not have to be the same) as\n\t\/\/ the package which will be used for a VM instance running this image. On the\n\t\/\/ Joyent public cloud this could for example be g3-standard-0.5-smartos.\n\tMachinePackage string `mapstructure:\"source_machine_package\" required:\"true\"`\n\t\/\/ The UUID of the image to base the new\n\t\/\/ image on. Triton supports multiple types of images, called 'brands' in\n\t\/\/ Triton \/ Joyent lingo, for contains and VM's. See the chapter Containers\n\t\/\/ and virtual machines in\n\t\/\/ the Joyent Triton documentation for detailed information. The following\n\t\/\/ brands are currently supported by this builder:joyent andkvm. The\n\t\/\/ choice of base image automatically decides the brand. On the Joyent public\n\t\/\/ cloud a valid source_machine_image could for example be\n\t\/\/ 70e3ae72-96b6-11e6-9056-9737fd4d0764 for version 16.3.1 of the 64bit\n\t\/\/ SmartOS base image (a 'joyent' brand image). source_machine_image_filter\n\t\/\/ can be used to populate this UUID.\n\tMachineImage string `mapstructure:\"source_machine_image\" required:\"true\"`\n\t\/\/ The UUID's of Triton\n\t\/\/ networks added to the source machine used for creating the image. For\n\t\/\/ example if any of the provisioners which are run need Internet access you\n\t\/\/ will need to add the UUID's of the appropriate networks here. If this is\n\t\/\/ not specified, instances will be placed into the default Triton public and\n\t\/\/ internal networks.\n\tMachineNetworks []string `mapstructure:\"source_machine_networks\" required:\"false\"`\n\t\/\/ Triton metadata\n\t\/\/ applied to the VM used to create the image. Metadata can be used to pass\n\t\/\/ configuration information to the VM without the need for networking. See\n\t\/\/ Using the metadata\n\t\/\/ API in the\n\t\/\/ Joyent documentation for more information. This can for example be used to\n\t\/\/ set the user-script metadata key to have Triton start a user supplied\n\t\/\/ script after the VM has booted.\n\tMachineMetadata map[string]string `mapstructure:\"source_machine_metadata\" required:\"false\"`\n\t\/\/ Tags applied to the VM used to create the image.\n\tMachineTags map[string]string `mapstructure:\"source_machine_tags\" required:\"false\"`\n\t\/\/ Same as [`source_machine_tags`](#source_machine_tags) but defined as a\n\t\/\/ singular block containing a key and a value field. In HCL2 mode the\n\t\/\/ [`dynamic_block`](https:\/\/packer.io\/docs\/configuration\/from-1.5\/expressions.html#dynamic-blocks)\n\t\/\/ will allow you to create those programatically.\n\tMachineTag hcl2template.KeyValues `mapstructure:\"source_machine_tag\" required:\"false\"`\n\t\/\/ Whether or not the firewall\n\t\/\/ of the VM used to create an image of is enabled. The Triton firewall only\n\t\/\/ filters inbound traffic to the VM. All outbound traffic is always allowed.\n\t\/\/ Currently this builder does not provide an interface to add specific\n\t\/\/ firewall rules. Unless you have a global rule defined in Triton which\n\t\/\/ allows SSH traffic enabling the firewall will interfere with the SSH\n\t\/\/ provisioner. The default is false.\n\tMachineFirewallEnabled bool `mapstructure:\"source_machine_firewall_enabled\" required:\"false\"`\n\t\/\/ Filters used to populate the\n\t\/\/ source_machine_image field. Example:\n\tMachineImageFilters MachineImageFilter `mapstructure:\"source_machine_image_filter\" required:\"false\"`\n}\n\ntype MachineImageFilter struct {\n\tMostRecent bool `mapstructure:\"most_recent\"`\n\tName string\n\tOS string\n\tVersion string\n\tPublic bool\n\tState string\n\tOwner string\n\tType string\n}\n\nfunc (m *MachineImageFilter) Empty() bool {\n\treturn m.Name == \"\" && m.OS == \"\" && m.Version == \"\" && m.State == \"\" && m.Owner == \"\" && m.Type == \"\"\n}\n\n\/\/ Prepare performs basic validation on a SourceMachineConfig struct.\nfunc (c *SourceMachineConfig) Prepare(ctx *interpolate.Context) []error {\n\tvar errs []error\n\n\tif c.MachinePackage == \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"A source_machine_package must be specified\"))\n\t}\n\n\tif c.MachineImage != \"\" && c.MachineImageFilters.Name != \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"You cannot specify a Machine Image and also Machine Name filter\"))\n\t}\n\n\tif c.MachineNetworks == nil {\n\t\tc.MachineNetworks = []string{}\n\t}\n\n\tif c.MachineMetadata == nil {\n\t\tc.MachineMetadata = make(map[string]string)\n\t}\n\n\tif c.MachineTags == nil {\n\t\tc.MachineTags = make(map[string]string)\n\t}\n\n\terrs = append(errs, c.MachineTag.CopyOn(c.MachineTags)...)\n\n\treturn errs\n}\n<|endoftext|>"} {"text":"<commit_before>package operators\n\nimport (\n\t\"math\"\n)\n\nvar (\n\tadd = &Operator{\n\t\tName: \"+\",\n\t\tPrecedence: 1,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] + args[1]\n\t\t},\n\t}\n\tsub = &Operator{\n\t\tName: \"-\",\n\t\tPrecedence: 1,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] - args[1]\n\t\t},\n\t}\n\tneg = &Operator{\n\t\tName: \"neg\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 1,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn 0 - args[0]\n\t\t},\n\t}\n\tmul = &Operator{\n\t\tName: \"*\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] * args[1]\n\t\t},\n\t}\n\tdiv = &Operator{\n\t\tName: \"\/\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] \/ args[1]\n\t\t},\n\t}\n\tmod = &Operator{\n\t\tName: \"%\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn math.Mod(args[0], args[1])\n\t\t},\n\t}\n\tpow = &Operator{\n\t\tName: \"^\",\n\t\tPrecedence: 3,\n\t\tAssociativity: R,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn math.Pow(args[0], args[1])\n\t\t},\n\t}\n\n)\n\nfunc init() {\n\tRegister(add)\n\tRegister(sub)\n\tRegister(neg)\n\tRegister(pow)\n\tRegister(mul)\n Register(mod)\n\tRegister(div)\n}\n<commit_msg>Fix up formatting for modulo operator<commit_after>package operators\n\nimport (\n\t\"math\"\n)\n\nvar (\n\tadd = &Operator{\n\t\tName: \"+\",\n\t\tPrecedence: 1,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] + args[1]\n\t\t},\n\t}\n\tsub = &Operator{\n\t\tName: \"-\",\n\t\tPrecedence: 1,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] - args[1]\n\t\t},\n\t}\n\tneg = &Operator{\n\t\tName: \"neg\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 1,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn 0 - args[0]\n\t\t},\n\t}\n\tmul = &Operator{\n\t\tName: \"*\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] * args[1]\n\t\t},\n\t}\n\tdiv = &Operator{\n\t\tName: \"\/\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn args[0] \/ args[1]\n\t\t},\n\t}\n\tmod = &Operator{\n\t\tName: \"%\",\n\t\tPrecedence: 2,\n\t\tAssociativity: L,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn math.Mod(args[0], args[1])\n\t\t},\n\t}\n\tpow = &Operator{\n\t\tName: \"^\",\n\t\tPrecedence: 3,\n\t\tAssociativity: R,\n\t\tArgs: 2,\n\t\tOperation: func(args []float64) float64 {\n\t\t\treturn math.Pow(args[0], args[1])\n\t\t},\n\t}\n)\n\nfunc init() {\n\tRegister(add)\n\tRegister(sub)\n\tRegister(neg)\n\tRegister(pow)\n\tRegister(mul)\n\tRegister(mod)\n\tRegister(div)\n}\n<|endoftext|>"} {"text":"<commit_before>package application_test\n\nimport (\n\t\"errors\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/applications\/applicationsfakes\"\n\t\"code.cloudfoundry.org\/cli\/cf\/commandregistry\"\n\t\"code.cloudfoundry.org\/cli\/cf\/configuration\/coreconfig\"\n\t\"code.cloudfoundry.org\/cli\/cf\/models\"\n\t\"code.cloudfoundry.org\/cli\/cf\/requirements\"\n\t\"code.cloudfoundry.org\/cli\/cf\/requirements\/requirementsfakes\"\n\ttestcmd \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/commands\"\n\ttestconfig \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/configuration\"\n\ttestterm \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/terminal\"\n\n\t. \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/matchers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"set-env command\", func() {\n\tvar (\n\t\tui *testterm.FakeUI\n\t\tconfigRepo coreconfig.Repository\n\t\tapp models.Application\n\t\tappRepo *applicationsfakes.FakeRepository\n\t\trequirementsFactory *requirementsfakes.FakeFactory\n\t\tdeps commandregistry.Dependency\n\t)\n\n\tupdateCommandDependency := func(pluginCall bool) {\n\t\tdeps.UI = ui\n\t\tdeps.Config = configRepo\n\t\tdeps.RepoLocator = deps.RepoLocator.SetApplicationRepository(appRepo)\n\t\tcommandregistry.Commands.SetCommand(commandregistry.Commands.FindCommand(\"set-env\").SetDependency(deps, pluginCall))\n\t}\n\n\tBeforeEach(func() {\n\t\tui = &testterm.FakeUI{}\n\t\tapp = models.Application{}\n\t\tapp.Name = \"my-app\"\n\t\tapp.GUID = \"my-app-guid\"\n\t\tappRepo = new(applicationsfakes.FakeRepository)\n\t\trequirementsFactory = new(requirementsfakes.FakeFactory)\n\t\tconfigRepo = testconfig.NewRepositoryWithDefaults()\n\t})\n\n\trunCommand := func(args ...string) bool {\n\t\treturn testcmd.RunCLICommand(\"set-env\", args, requirementsFactory, updateCommandDependency, false, ui)\n\t}\n\n\tDescribe(\"requirements\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapplicationReq := new(requirementsfakes.FakeApplicationRequirement)\n\t\t\tapplicationReq.GetApplicationReturns(app)\n\t\t\trequirementsFactory.NewApplicationRequirementReturns(applicationReq)\n\t\t})\n\n\t\tIt(\"fails when login is not successful\", func() {\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Failing{Message: \"not logged in\"})\n\n\t\t\tExpect(runCommand(\"hey\", \"gabba\", \"gabba\")).To(BeFalse())\n\t\t})\n\n\t\tIt(\"fails when a space is not targeted\", func() {\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Passing{})\n\t\t\trequirementsFactory.NewTargetedSpaceRequirementReturns(requirements.Failing{Message: \"not targeting space\"})\n\n\t\t\tExpect(runCommand(\"hey\", \"gabba\", \"gabba\")).To(BeFalse())\n\t\t})\n\n\t\tIt(\"fails with usage when not provided with exactly three args\", func() {\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Passing{})\n\t\t\trequirementsFactory.NewTargetedSpaceRequirementReturns(requirements.Passing{})\n\n\t\t\trunCommand(\"zomg\", \"too\", \"many\", \"args\")\n\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t[]string{\"Incorrect Usage\", \"Requires\", \"arguments\"},\n\t\t\t))\n\t\t})\n\t})\n\n\tContext(\"when logged in, a space is targeted and given enough args\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapp.EnvironmentVars = map[string]interface{}{\"foo\": \"bar\"}\n\t\t\tapplicationReq := new(requirementsfakes.FakeApplicationRequirement)\n\t\t\tapplicationReq.GetApplicationReturns(app)\n\t\t\trequirementsFactory.NewApplicationRequirementReturns(applicationReq)\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Passing{})\n\t\t\trequirementsFactory.NewTargetedSpaceRequirementReturns(requirements.Passing{})\n\t\t})\n\n\t\tContext(\"when it is new\", func() {\n\t\t\tIt(\"is created\", func() {\n\t\t\t\trunCommand(\"my-app\", \"DATABASE_URL\", \"mysql:\/\/new-example.com\/my-db\")\n\n\t\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"Setting env variable\",\n\t\t\t\t\t\t\"DATABASE_URL\",\n\t\t\t\t\t\t\"my-app\",\n\t\t\t\t\t\t\"my-org\",\n\t\t\t\t\t\t\"my-space\",\n\t\t\t\t\t\t\"my-user\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\"OK\"},\n\t\t\t\t\t[]string{\"TIP\"},\n\t\t\t\t))\n\n\t\t\t\tExpect(ui.Outputs()).ToNot(ContainSubstrings([]string{\"mysql:\/\/new-example.com\/my-db\"}))\n\n\t\t\t\tappGUID, params := appRepo.UpdateArgsForCall(0)\n\t\t\t\tExpect(appGUID).To(Equal(app.GUID))\n\t\t\t\tExpect(*params.EnvironmentVars).To(Equal(map[string]interface{}{\n\t\t\t\t\t\"DATABASE_URL\": \"mysql:\/\/new-example.com\/my-db\",\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when it already exists\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tapp.EnvironmentVars[\"DATABASE_URL\"] = \"mysql:\/\/old-example.com\/my-db\"\n\t\t\t})\n\n\t\t\tIt(\"is updated\", func() {\n\t\t\t\trunCommand(\"my-app\", \"DATABASE_URL\", \"mysql:\/\/new-example.com\/my-db\")\n\n\t\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"Setting env variable\",\n\t\t\t\t\t\t\"DATABASE_URL\",\n\t\t\t\t\t\t\"mysql:\/\/new-example.com\/my-db\",\n\t\t\t\t\t\t\"my-app\",\n\t\t\t\t\t\t\"my-org\",\n\t\t\t\t\t\t\"my-space\",\n\t\t\t\t\t\t\"my-user\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\"OK\"},\n\t\t\t\t\t[]string{\"TIP\"},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"allows the variable value to begin with a hyphen\", func() {\n\t\t\trunCommand(\"my-app\", \"MY_VAR\", \"--has-a-cool-value\")\n\n\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t[]string{\n\t\t\t\t\t\"Setting env variable\",\n\t\t\t\t\t\"MY_VAR\",\n\t\t\t\t\t\"--has-a-cool-value\",\n\t\t\t\t},\n\t\t\t\t[]string{\"OK\"},\n\t\t\t\t[]string{\"TIP\"},\n\t\t\t))\n\t\t\t_, params := appRepo.UpdateArgsForCall(0)\n\t\t\tExpect(*params.EnvironmentVars).To(Equal(map[string]interface{}{\n\t\t\t\t\"MY_VAR\": \"--has-a-cool-value\",\n\t\t\t\t\"foo\": \"bar\",\n\t\t\t}))\n\t\t})\n\n\t\tContext(\"when setting fails\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tappRepo.UpdateReturns(models.Application{}, errors.New(\"Error updating app.\"))\n\t\t\t})\n\n\t\t\tIt(\"tells the user\", func() {\n\t\t\t\trunCommand(\"please\", \"dont\", \"fail\")\n\n\t\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t\t[]string{\"Setting env variable\"},\n\t\t\t\t\t[]string{\"FAILED\"},\n\t\t\t\t\t[]string{\"Error updating app.\"},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"gives the appropriate tip\", func() {\n\t\t\trunCommand(\"my-app\", \"DATABASE_URL\", \"mysql:\/\/new-example.com\/my-db\")\n\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t[]string{\"TIP: Use 'cf restage my-app' to ensure your env variable changes take effect\"},\n\t\t\t))\n\t\t})\n\t})\n})\n<commit_msg>Fixed failing unit tests on legacy set-env<commit_after>package application_test\n\nimport (\n\t\"errors\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/applications\/applicationsfakes\"\n\t\"code.cloudfoundry.org\/cli\/cf\/commandregistry\"\n\t\"code.cloudfoundry.org\/cli\/cf\/configuration\/coreconfig\"\n\t\"code.cloudfoundry.org\/cli\/cf\/models\"\n\t\"code.cloudfoundry.org\/cli\/cf\/requirements\"\n\t\"code.cloudfoundry.org\/cli\/cf\/requirements\/requirementsfakes\"\n\ttestcmd \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/commands\"\n\ttestconfig \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/configuration\"\n\ttestterm \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/terminal\"\n\n\t. \"code.cloudfoundry.org\/cli\/cf\/util\/testhelpers\/matchers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"set-env command\", func() {\n\tvar (\n\t\tui *testterm.FakeUI\n\t\tconfigRepo coreconfig.Repository\n\t\tapp models.Application\n\t\tappRepo *applicationsfakes.FakeRepository\n\t\trequirementsFactory *requirementsfakes.FakeFactory\n\t\tdeps commandregistry.Dependency\n\t)\n\n\tupdateCommandDependency := func(pluginCall bool) {\n\t\tdeps.UI = ui\n\t\tdeps.Config = configRepo\n\t\tdeps.RepoLocator = deps.RepoLocator.SetApplicationRepository(appRepo)\n\t\tcommandregistry.Commands.SetCommand(commandregistry.Commands.FindCommand(\"set-env\").SetDependency(deps, pluginCall))\n\t}\n\n\tBeforeEach(func() {\n\t\tui = &testterm.FakeUI{}\n\t\tapp = models.Application{}\n\t\tapp.Name = \"my-app\"\n\t\tapp.GUID = \"my-app-guid\"\n\t\tappRepo = new(applicationsfakes.FakeRepository)\n\t\trequirementsFactory = new(requirementsfakes.FakeFactory)\n\t\tconfigRepo = testconfig.NewRepositoryWithDefaults()\n\t})\n\n\trunCommand := func(args ...string) bool {\n\t\treturn testcmd.RunCLICommand(\"set-env\", args, requirementsFactory, updateCommandDependency, false, ui)\n\t}\n\n\tDescribe(\"requirements\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapplicationReq := new(requirementsfakes.FakeApplicationRequirement)\n\t\t\tapplicationReq.GetApplicationReturns(app)\n\t\t\trequirementsFactory.NewApplicationRequirementReturns(applicationReq)\n\t\t})\n\n\t\tIt(\"fails when login is not successful\", func() {\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Failing{Message: \"not logged in\"})\n\n\t\t\tExpect(runCommand(\"hey\", \"gabba\", \"gabba\")).To(BeFalse())\n\t\t})\n\n\t\tIt(\"fails when a space is not targeted\", func() {\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Passing{})\n\t\t\trequirementsFactory.NewTargetedSpaceRequirementReturns(requirements.Failing{Message: \"not targeting space\"})\n\n\t\t\tExpect(runCommand(\"hey\", \"gabba\", \"gabba\")).To(BeFalse())\n\t\t})\n\n\t\tIt(\"fails with usage when not provided with exactly three args\", func() {\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Passing{})\n\t\t\trequirementsFactory.NewTargetedSpaceRequirementReturns(requirements.Passing{})\n\n\t\t\trunCommand(\"zomg\", \"too\", \"many\", \"args\")\n\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t[]string{\"Incorrect Usage\", \"Requires\", \"arguments\"},\n\t\t\t))\n\t\t})\n\t})\n\n\tContext(\"when logged in, a space is targeted and given enough args\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapp.EnvironmentVars = map[string]interface{}{\"foo\": \"bar\"}\n\t\t\tapplicationReq := new(requirementsfakes.FakeApplicationRequirement)\n\t\t\tapplicationReq.GetApplicationReturns(app)\n\t\t\trequirementsFactory.NewApplicationRequirementReturns(applicationReq)\n\t\t\trequirementsFactory.NewLoginRequirementReturns(requirements.Passing{})\n\t\t\trequirementsFactory.NewTargetedSpaceRequirementReturns(requirements.Passing{})\n\t\t})\n\n\t\tContext(\"when it is new\", func() {\n\t\t\tIt(\"is created\", func() {\n\t\t\t\trunCommand(\"my-app\", \"DATABASE_URL\", \"mysql:\/\/new-example.com\/my-db\")\n\n\t\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"Setting env variable\",\n\t\t\t\t\t\t\"DATABASE_URL\",\n\t\t\t\t\t\t\"my-app\",\n\t\t\t\t\t\t\"my-org\",\n\t\t\t\t\t\t\"my-space\",\n\t\t\t\t\t\t\"my-user\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\"OK\"},\n\t\t\t\t\t[]string{\"TIP\"},\n\t\t\t\t))\n\n\t\t\t\tExpect(ui.Outputs()).ToNot(ContainSubstrings([]string{\"mysql:\/\/new-example.com\/my-db\"}))\n\n\t\t\t\tappGUID, params := appRepo.UpdateArgsForCall(0)\n\t\t\t\tExpect(appGUID).To(Equal(app.GUID))\n\t\t\t\tExpect(*params.EnvironmentVars).To(Equal(map[string]interface{}{\n\t\t\t\t\t\"DATABASE_URL\": \"mysql:\/\/new-example.com\/my-db\",\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when it already exists\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tapp.EnvironmentVars[\"DATABASE_URL\"] = \"mysql:\/\/old-example.com\/my-db\"\n\t\t\t})\n\n\t\t\tIt(\"is updated\", func() {\n\t\t\t\trunCommand(\"my-app\", \"DATABASE_URL\", \"mysql:\/\/new-example.com\/my-db\")\n\n\t\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"Setting env variable\",\n\t\t\t\t\t\t\"DATABASE_URL\",\n\t\t\t\t\t\t\"my-app\",\n\t\t\t\t\t\t\"my-org\",\n\t\t\t\t\t\t\"my-space\",\n\t\t\t\t\t\t\"my-user\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\"OK\"},\n\t\t\t\t\t[]string{\"TIP\"},\n\t\t\t\t))\n\n\t\t\t\tExpect(ui.Outputs()).ToNot(ContainSubstrings([]string{\"mysql:\/\/new-example.com\/my-db\"}))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"allows the variable value to begin with a hyphen\", func() {\n\t\t\trunCommand(\"my-app\", \"MY_VAR\", \"--has-a-cool-value\")\n\n\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t[]string{\n\t\t\t\t\t\"Setting env variable\",\n\t\t\t\t\t\"MY_VAR\",\n\t\t\t\t},\n\t\t\t\t[]string{\"OK\"},\n\t\t\t\t[]string{\"TIP\"},\n\t\t\t))\n\t\t\t_, params := appRepo.UpdateArgsForCall(0)\n\t\t\tExpect(*params.EnvironmentVars).To(Equal(map[string]interface{}{\n\t\t\t\t\"MY_VAR\": \"--has-a-cool-value\",\n\t\t\t\t\"foo\": \"bar\",\n\t\t\t}))\n\t\t})\n\n\t\tContext(\"when setting fails\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tappRepo.UpdateReturns(models.Application{}, errors.New(\"Error updating app.\"))\n\t\t\t})\n\n\t\t\tIt(\"tells the user\", func() {\n\t\t\t\trunCommand(\"please\", \"dont\", \"fail\")\n\n\t\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t\t[]string{\"Setting env variable\"},\n\t\t\t\t\t[]string{\"FAILED\"},\n\t\t\t\t\t[]string{\"Error updating app.\"},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"gives the appropriate tip\", func() {\n\t\t\trunCommand(\"my-app\", \"DATABASE_URL\", \"mysql:\/\/new-example.com\/my-db\")\n\t\t\tExpect(ui.Outputs()).To(ContainSubstrings(\n\t\t\t\t[]string{\"TIP: Use 'cf restage my-app' to ensure your env variable changes take effect\"},\n\t\t\t))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/*\n WARNING WARNING WARNING\n\n Attention all potential contributors\n\n This testfile is not in the best state. We've been slowly transitioning\n from the built in \"testing\" package to using Ginkgo. As you can see, we've\n changed the format, but a lot of the setup, test body, descriptions, etc\n are either hardcoded, completely lacking, or misleading.\n\n For example:\n\n Describe(\"Testing with ginkgo\"...) \/\/ This is not a great description\n It(\"TestDoesSoemthing\"...) \/\/ This is a horrible description\n\n Describe(\"create-user command\"... \/\/ Describe the actual object under test\n It(\"creates a user when provided ...\" \/\/ this is more descriptive\n\n For good examples of writing Ginkgo tests for the cli, refer to\n\n src\/github.com\/cloudfoundry\/cli\/cf\/commands\/application\/delete_app_test.go\n src\/github.com\/cloudfoundry\/cli\/cf\/terminal\/ui_test.go\n src\/github.com\/cloudfoundry\/loggregator_consumer\/consumer_test.go\n*\/\n\npackage user_test\n\nimport (\n\t. \"github.com\/cloudfoundry\/cli\/cf\/commands\/user\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/configuration\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/models\"\n\ttestapi \"github.com\/cloudfoundry\/cli\/testhelpers\/api\"\n\ttestcmd \"github.com\/cloudfoundry\/cli\/testhelpers\/commands\"\n\ttestconfig \"github.com\/cloudfoundry\/cli\/testhelpers\/configuration\"\n\ttestreq \"github.com\/cloudfoundry\/cli\/testhelpers\/requirements\"\n\ttestterm \"github.com\/cloudfoundry\/cli\/testhelpers\/terminal\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t. \"github.com\/cloudfoundry\/cli\/testhelpers\/matchers\"\n)\n\nfunc getSetSpaceRoleDeps() (requirementsFactory *testreq.FakeReqFactory, spaceRepo *testapi.FakeSpaceRepository, userRepo *testapi.FakeUserRepository) {\n\trequirementsFactory = &testreq.FakeReqFactory{}\n\tspaceRepo = &testapi.FakeSpaceRepository{}\n\tuserRepo = &testapi.FakeUserRepository{}\n\treturn\n}\n\nfunc callSetSpaceRole(args []string, requirementsFactory *testreq.FakeReqFactory, spaceRepo *testapi.FakeSpaceRepository, userRepo *testapi.FakeUserRepository) (ui *testterm.FakeUI) {\n\tui = new(testterm.FakeUI)\n\tconfigRepo := testconfig.NewRepositoryWithDefaults()\n\taccessToken, err := testconfig.EncodeAccessToken(configuration.TokenInfo{\n\t\tUsername: \"current-user\",\n\t})\n\tExpect(err).NotTo(HaveOccurred())\n\tconfigRepo.SetAccessToken(accessToken)\n\n\tcmd := NewSetSpaceRole(ui, configRepo, spaceRepo, userRepo)\n\ttestcmd.RunCommand(cmd, args, requirementsFactory)\n\treturn\n}\n\nvar _ = Describe(\"Testing with ginkgo\", func() {\n\tIt(\"TestSetSpaceRoleFailsWithUsage\", func() {\n\t\trequirementsFactory, spaceRepo, userRepo := getSetSpaceRoleDeps()\n\n\t\tui := callSetSpaceRole([]string{}, requirementsFactory, spaceRepo, userRepo)\n\t\tExpect(ui.FailedWithUsage).To(BeTrue())\n\n\t\tui = callSetSpaceRole([]string{\"my-user\"}, requirementsFactory, spaceRepo, userRepo)\n\t\tExpect(ui.FailedWithUsage).To(BeTrue())\n\n\t\tui = callSetSpaceRole([]string{\"my-user\", \"my-org\"}, requirementsFactory, spaceRepo, userRepo)\n\t\tExpect(ui.FailedWithUsage).To(BeTrue())\n\n\t\tui = callSetSpaceRole([]string{\"my-user\", \"my-org\", \"my-space\"}, requirementsFactory, spaceRepo, userRepo)\n\t\tExpect(ui.FailedWithUsage).To(BeTrue())\n\n\t\tui = callSetSpaceRole([]string{\"my-user\", \"my-org\", \"my-space\", \"my-role\"}, requirementsFactory, spaceRepo, userRepo)\n\n\t\tExpect(ui.FailedWithUsage).To(BeFalse())\n\t})\n\n\tIt(\"TestSetSpaceRoleRequirements\", func() {\n\t\targs := []string{\"username\", \"org\", \"space\", \"role\"}\n\t\trequirementsFactory, spaceRepo, userRepo := getSetSpaceRoleDeps()\n\n\t\trequirementsFactory.LoginSuccess = false\n\t\tcallSetSpaceRole(args, requirementsFactory, spaceRepo, userRepo)\n\t\tExpect(testcmd.CommandDidPassRequirements).To(BeFalse())\n\n\t\trequirementsFactory.LoginSuccess = true\n\t\tcallSetSpaceRole(args, requirementsFactory, spaceRepo, userRepo)\n\t\tExpect(testcmd.CommandDidPassRequirements).To(BeTrue())\n\n\t\tExpect(requirementsFactory.UserUsername).To(Equal(\"username\"))\n\t\tExpect(requirementsFactory.OrganizationName).To(Equal(\"org\"))\n\t})\n\n\tIt(\"TestSetSpaceRole\", func() {\n\t\torg := models.Organization{}\n\t\torg.Guid = \"my-org-guid\"\n\t\torg.Name = \"my-org\"\n\n\t\targs := []string{\"some-user\", \"some-org\", \"some-space\", \"SpaceManager\"}\n\n\t\trequirementsFactory, spaceRepo, userRepo := getSetSpaceRoleDeps()\n\t\trequirementsFactory.LoginSuccess = true\n\t\trequirementsFactory.UserFields = models.UserFields{}\n\t\trequirementsFactory.UserFields.Guid = \"my-user-guid\"\n\t\trequirementsFactory.UserFields.Username = \"my-user\"\n\t\trequirementsFactory.Organization = org\n\n\t\tspaceRepo.FindByNameInOrgSpace = models.Space{}\n\t\tspaceRepo.FindByNameInOrgSpace.Guid = \"my-space-guid\"\n\t\tspaceRepo.FindByNameInOrgSpace.Name = \"my-space\"\n\t\tspaceRepo.FindByNameInOrgSpace.Organization = org.OrganizationFields\n\n\t\tui := callSetSpaceRole(args, requirementsFactory, spaceRepo, userRepo)\n\n\t\tExpect(ui.Outputs).To(ContainSubstrings(\n\t\t\t[]string{\"Assigning role \", \"SpaceManager\", \"my-user\", \"my-org\", \"my-space\", \"current-user\"},\n\t\t\t[]string{\"OK\"},\n\t\t))\n\n\t\tExpect(spaceRepo.FindByNameInOrgName).To(Equal(\"some-space\"))\n\t\tExpect(spaceRepo.FindByNameInOrgOrgGuid).To(Equal(\"my-org-guid\"))\n\n\t\tExpect(userRepo.SetSpaceRoleUserGuid).To(Equal(\"my-user-guid\"))\n\t\tExpect(userRepo.SetSpaceRoleSpaceGuid).To(Equal(\"my-space-guid\"))\n\t\tExpect(userRepo.SetSpaceRoleRole).To(Equal(models.SPACE_MANAGER))\n\t})\n})\n<commit_msg>Cleanup set space role test<commit_after>package user_test\n\nimport (\n\t. \"github.com\/cloudfoundry\/cli\/cf\/commands\/user\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/configuration\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/models\"\n\ttestapi \"github.com\/cloudfoundry\/cli\/testhelpers\/api\"\n\ttestcmd \"github.com\/cloudfoundry\/cli\/testhelpers\/commands\"\n\ttestconfig \"github.com\/cloudfoundry\/cli\/testhelpers\/configuration\"\n\ttestreq \"github.com\/cloudfoundry\/cli\/testhelpers\/requirements\"\n\ttestterm \"github.com\/cloudfoundry\/cli\/testhelpers\/terminal\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t. \"github.com\/cloudfoundry\/cli\/testhelpers\/matchers\"\n)\n\nvar _ = Describe(\"set-space-role command\", func() {\n\tvar (\n\t\tui *testterm.FakeUI\n\t\trequirementsFactory *testreq.FakeReqFactory\n\t\tspaceRepo *testapi.FakeSpaceRepository\n\t\tuserRepo *testapi.FakeUserRepository\n\t\tconfigRepo configuration.ReadWriter\n\t)\n\n\tBeforeEach(func() {\n\t\tconfigRepo = testconfig.NewRepositoryWithDefaults()\n\t\taccessToken, err := testconfig.EncodeAccessToken(configuration.TokenInfo{Username: \"current-user\"})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tconfigRepo.SetAccessToken(accessToken)\n\n\t\tui = &testterm.FakeUI{}\n\t\trequirementsFactory = &testreq.FakeReqFactory{}\n\t\tspaceRepo = &testapi.FakeSpaceRepository{}\n\t\tuserRepo = &testapi.FakeUserRepository{}\n\t})\n\n\trunCommand := func(args ...string) {\n\t\ttestcmd.RunCommand(NewSetSpaceRole(ui, configRepo, spaceRepo, userRepo), args, requirementsFactory)\n\t}\n\n\tIt(\"fails with usage when not provided exactly four args\", func() {\n\t\trunCommand(\"foo\", \"bar\", \"baz\")\n\t\tExpect(ui.FailedWithUsage).To(BeTrue())\n\t})\n\n\tIt(\"does not fail with usage when provided four args\", func() {\n\t\trunCommand(\"whatever\", \"these\", \"are\", \"args\")\n\t\tExpect(ui.FailedWithUsage).To(BeFalse())\n\t})\n\n\tDescribe(\"requirements\", func() {\n\t\tIt(\"fails when not logged in\", func() {\n\t\t\trunCommand(\"username\", \"org\", \"space\", \"role\")\n\n\t\t\tExpect(testcmd.CommandDidPassRequirements).To(BeFalse())\n\t\t})\n\n\t\tIt(\"succeeds when logged in\", func() {\n\t\t\trequirementsFactory.LoginSuccess = true\n\t\t\trunCommand(\"username\", \"org\", \"space\", \"role\")\n\n\t\t\tExpect(testcmd.CommandDidPassRequirements).To(BeTrue())\n\t\t\tExpect(requirementsFactory.UserUsername).To(Equal(\"username\"))\n\t\t\tExpect(requirementsFactory.OrganizationName).To(Equal(\"org\"))\n\t\t})\n\t})\n\n\tContext(\"when logged in\", func() {\n\t\tBeforeEach(func() {\n\t\t\trequirementsFactory.LoginSuccess = true\n\n\t\t\torg := models.Organization{}\n\t\t\torg.Guid = \"my-org-guid\"\n\t\t\torg.Name = \"my-org\"\n\n\t\t\trequirementsFactory.UserFields = models.UserFields{Guid: \"my-user-guid\", Username: \"my-user\"}\n\t\t\trequirementsFactory.Organization = org\n\n\t\t\tspaceRepo.FindByNameInOrgSpace = models.Space{}\n\t\t\tspaceRepo.FindByNameInOrgSpace.Guid = \"my-space-guid\"\n\t\t\tspaceRepo.FindByNameInOrgSpace.Name = \"my-space\"\n\t\t\tspaceRepo.FindByNameInOrgSpace.Organization = org.OrganizationFields\n\t\t})\n\n\t\tIt(\"sets the given space role on the given user\", func() {\n\t\t\trunCommand(\"some-user\", \"some-org\", \"some-space\", \"SpaceManager\")\n\n\t\t\tExpect(ui.Outputs).To(ContainSubstrings(\n\t\t\t\t[]string{\"Assigning role \", \"SpaceManager\", \"my-user\", \"my-org\", \"my-space\", \"current-user\"},\n\t\t\t\t[]string{\"OK\"},\n\t\t\t))\n\n\t\t\tExpect(spaceRepo.FindByNameInOrgName).To(Equal(\"some-space\"))\n\t\t\tExpect(spaceRepo.FindByNameInOrgOrgGuid).To(Equal(\"my-org-guid\"))\n\n\t\t\tExpect(userRepo.SetSpaceRoleUserGuid).To(Equal(\"my-user-guid\"))\n\t\t\tExpect(userRepo.SetSpaceRoleSpaceGuid).To(Equal(\"my-space-guid\"))\n\t\t\tExpect(userRepo.SetSpaceRoleRole).To(Equal(models.SPACE_MANAGER))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package packet\n\nimport (\n\t\"bytes\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestPacketEncoding(t *testing.T) {\n\tcases := []struct {\n\t\tdecoded []byte \/\/ excluding CRC byte\n\t\tencoded []byte\n\t}{\n\t\t{parseBytes(\"A7 12 89 86 5D 00\"), parseBytes(\"A9 6C 72 69 96 A6 94 D5 55 2C E5\")},\n\t\t{parseBytes(\"A7 12 89 86 06 00\"), parseBytes(\"A9 6C 72 69 96 A6 56 65 55 C6 55\")},\n\t\t{parseBytes(\"A7 12 89 86 15 09\"), parseBytes(\"A9 6C 72 69 96 A6 C6 55 59 96 65\")},\n\t\t{parseBytes(\"A7 12 89 86 8D 00\"), parseBytes(\"A9 6C 72 69 96 A6 68 D5 55 2D 55\")},\n\t\t{parseBytes(\"A7 12 89 86 8D 09 03 37 32 32 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\"), parseBytes(\"A9 6C 72 69 96 A6 68 D5 59 56 38 D6 8F 28 F2 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 8D 95\")},\n\t\t{parseBytes(\"A8 0F 25 C1 23 0D 19 1C 50 00 8F 00 90 00 34 34 99 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\"), parseBytes(\"A9 A5 5C CA 5B 31 CA 35 4D C5 9C 6C 95 55 55 69 C5 55 65 55 55 8F 48 F4 65 95 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 34 E3 96\")},\n\t}\n\tfor _, c := range cases {\n\t\tt.Run(\"encode\", func(t *testing.T) {\n\t\t\tresult := Encode(c.decoded)\n\t\t\tif !bytes.Equal(result, c.encoded) {\n\t\t\t\tt.Errorf(\"Encode(% X) == % X, want % X\", c.decoded, result, c.encoded)\n\t\t\t}\n\t\t})\n\t\tt.Run(\"decode\", func(t *testing.T) {\n\t\t\tresult, err := Decode(c.encoded)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Decode(% X) == %v, want % X\", c.encoded, err, c.decoded)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !bytes.Equal(result, c.decoded) {\n\t\t\t\tt.Errorf(\"Decode(% X) == % X, want % X\", c.encoded, result, c.decoded)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc parseBytes(hex string) []byte {\n\tfields := strings.Fields(hex)\n\tdata := make([]byte, len(fields))\n\tfor i, s := range fields {\n\t\tb, err := strconv.ParseUint(string(s), 16, 8)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdata[i] = byte(b)\n\t}\n\treturn data\n}\n<commit_msg>Simplify parseBytes function and make spaces optional<commit_after>package packet\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestPacketEncoding(t *testing.T) {\n\tcases := []struct {\n\t\tdecoded []byte \/\/ excluding CRC byte\n\t\tencoded []byte\n\t}{\n\t\t{parseBytes(\"A7 12 89 86 5D 00\"), parseBytes(\"A9 6C 72 69 96 A6 94 D5 55 2C E5\")},\n\t\t{parseBytes(\"A7 12 89 86 06 00\"), parseBytes(\"A9 6C 72 69 96 A6 56 65 55 C6 55\")},\n\t\t{parseBytes(\"A7 12 89 86 15 09\"), parseBytes(\"A9 6C 72 69 96 A6 C6 55 59 96 65\")},\n\t\t{parseBytes(\"A7 12 89 86 8D 00\"), parseBytes(\"A9 6C 72 69 96 A6 68 D5 55 2D 55\")},\n\t\t{parseBytes(\"A7 12 89 86 8D 09 03 37 32 32 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\"), parseBytes(\"A9 6C 72 69 96 A6 68 D5 59 56 38 D6 8F 28 F2 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 8D 95\")},\n\t\t{parseBytes(\"A8 0F 25 C1 23 0D 19 1C 50 00 8F 00 90 00 34 34 99 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\"), parseBytes(\"A9 A5 5C CA 5B 31 CA 35 4D C5 9C 6C 95 55 55 69 C5 55 65 55 55 8F 48 F4 65 95 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 34 E3 96\")},\n\t}\n\tfor _, c := range cases {\n\t\tt.Run(\"encode\", func(t *testing.T) {\n\t\t\tresult := Encode(c.decoded)\n\t\t\tif !bytes.Equal(result, c.encoded) {\n\t\t\t\tt.Errorf(\"Encode(% X) == % X, want % X\", c.decoded, result, c.encoded)\n\t\t\t}\n\t\t})\n\t\tt.Run(\"decode\", func(t *testing.T) {\n\t\t\tresult, err := Decode(c.encoded)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Decode(% X) == %v, want % X\", c.encoded, err, c.decoded)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !bytes.Equal(result, c.decoded) {\n\t\t\t\tt.Errorf(\"Decode(% X) == % X, want % X\", c.encoded, result, c.decoded)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc parseBytes(hex string) []byte {\n\tr := strings.NewReader(hex)\n\tvar data []byte\n\tfor {\n\t\tvar b byte\n\t\tn, err := fmt.Fscanf(r, \"%02x\", &b)\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdata = append(data, b)\n\t}\n\treturn data\n}\n<|endoftext|>"} {"text":"<commit_before>package quic\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"hash\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/protocol\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/utils\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/wire\"\n\t\"github.com\/lucas-clemente\/quic-go\/logging\"\n)\n\ntype statelessResetErr struct {\n\ttoken protocol.StatelessResetToken\n}\n\nfunc (e statelessResetErr) Error() string {\n\treturn fmt.Sprintf(\"received a stateless reset with token %x\", e.token)\n}\n\n\/\/ The packetHandlerMap stores packetHandlers, identified by connection ID.\n\/\/ It is used:\n\/\/ * by the server to store sessions\n\/\/ * when multiplexing outgoing connections to store clients\ntype packetHandlerMap struct {\n\tmutex sync.RWMutex\n\n\tconn connection\n\tconnIDLen int\n\n\thandlers map[string] \/* string(ConnectionID)*\/ packetHandler\n\tresetTokens map[protocol.StatelessResetToken] \/* stateless reset token *\/ packetHandler\n\tserver unknownPacketHandler\n\n\tlistening chan struct{} \/\/ is closed when listen returns\n\tclosed bool\n\n\tdeleteRetiredSessionsAfter time.Duration\n\n\tstatelessResetEnabled bool\n\tstatelessResetMutex sync.Mutex\n\tstatelessResetHasher hash.Hash\n\n\ttracer logging.Tracer\n\tlogger utils.Logger\n}\n\nvar _ packetHandlerManager = &packetHandlerMap{}\n\nfunc setReceiveBuffer(c net.PacketConn, logger utils.Logger) {\n\tconn, ok := c.(interface{ SetReadBuffer(int) error })\n\tif !ok {\n\t\tlogger.Debugf(\"Connection doesn't allow setting of receive buffer size\")\n\t\treturn\n\t}\n\tsize, err := inspectReadBuffer(c)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to determine receive buffer size: %s\", err)\n\t\treturn\n\t}\n\tif size >= protocol.DesiredReceiveBufferSize {\n\t\tlogger.Debugf(\"Conn has receive buffer of %d kiB (wanted: at least %d kiB)\", size\/1024, protocol.DesiredReceiveBufferSize\/1024)\n\t}\n\tif err := conn.SetReadBuffer(protocol.DesiredReceiveBufferSize); err != nil {\n\t\tlog.Printf(\"Failed to increase receive buffer size: %s\\n\", err)\n\t\treturn\n\t}\n\tnewSize, err := inspectReadBuffer(c)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to determine receive buffer size: %s\", err)\n\t\treturn\n\t}\n\tif newSize == size {\n\t\tlog.Printf(\"Failed to determine receive buffer size: %s\", err)\n\t\treturn\n\t}\n\tif newSize < protocol.DesiredReceiveBufferSize {\n\t\tlog.Printf(\"Failed to sufficiently increase receive buffer size. Was: %d kiB, wanted: %d kiB, got: %d kiB.\", size\/1024, protocol.DesiredReceiveBufferSize\/1024, newSize\/1024)\n\t\treturn\n\t}\n\tlogger.Debugf(\"Increased receive buffer size to %d kiB\", newSize\/1024)\n}\n\nfunc newPacketHandlerMap(\n\tc net.PacketConn,\n\tconnIDLen int,\n\tstatelessResetKey []byte,\n\ttracer logging.Tracer,\n\tlogger utils.Logger,\n) (packetHandlerManager, error) {\n\tsetReceiveBuffer(c, logger)\n\tconn, err := wrapConn(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &packetHandlerMap{\n\t\tconn: conn,\n\t\tconnIDLen: connIDLen,\n\t\tlistening: make(chan struct{}),\n\t\thandlers: make(map[string]packetHandler),\n\t\tresetTokens: make(map[protocol.StatelessResetToken]packetHandler),\n\t\tdeleteRetiredSessionsAfter: protocol.RetiredConnectionIDDeleteTimeout,\n\t\tstatelessResetEnabled: len(statelessResetKey) > 0,\n\t\tstatelessResetHasher: hmac.New(sha256.New, statelessResetKey),\n\t\ttracer: tracer,\n\t\tlogger: logger,\n\t}\n\tgo m.listen()\n\n\tif logger.Debug() {\n\t\tgo m.logUsage()\n\t}\n\treturn m, nil\n}\n\nfunc (h *packetHandlerMap) logUsage() {\n\tticker := time.NewTicker(2 * time.Second)\n\tvar printedZero bool\n\tfor {\n\t\tselect {\n\t\tcase <-h.listening:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t}\n\n\t\th.mutex.Lock()\n\t\tnumHandlers := len(h.handlers)\n\t\tnumTokens := len(h.resetTokens)\n\t\th.mutex.Unlock()\n\t\t\/\/ If the number tracked handlers and tokens is zero, only print it a single time.\n\t\thasZero := numHandlers == 0 && numTokens == 0\n\t\tif !hasZero || (hasZero && !printedZero) {\n\t\t\th.logger.Debugf(\"Tracking %d connection IDs and %d reset tokens.\\n\", numHandlers, numTokens)\n\t\t\tprintedZero = false\n\t\t\tif hasZero {\n\t\t\t\tprintedZero = true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler) bool \/* was added *\/ {\n\tsid := string(id)\n\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif _, ok := h.handlers[sid]; ok {\n\t\th.logger.Debugf(\"Not adding connection ID %s, as it already exists.\", id)\n\t\treturn false\n\t}\n\th.handlers[sid] = handler\n\th.logger.Debugf(\"Adding connection ID %s.\", id)\n\treturn true\n}\n\nfunc (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.ConnectionID, fn func() packetHandler) bool {\n\tsid := string(clientDestConnID)\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif _, ok := h.handlers[sid]; ok {\n\t\th.logger.Debugf(\"Not adding connection ID %s for a new session, as it already exists.\", clientDestConnID)\n\t\treturn false\n\t}\n\n\tsess := fn()\n\th.handlers[sid] = sess\n\th.handlers[string(newConnID)] = sess\n\th.logger.Debugf(\"Adding connection IDs %s and %s for a new session.\", clientDestConnID, newConnID)\n\treturn true\n}\n\nfunc (h *packetHandlerMap) Remove(id protocol.ConnectionID) {\n\th.mutex.Lock()\n\tdelete(h.handlers, string(id))\n\th.mutex.Unlock()\n\th.logger.Debugf(\"Removing connection ID %s.\", id)\n}\n\nfunc (h *packetHandlerMap) Retire(id protocol.ConnectionID) {\n\th.logger.Debugf(\"Retiring connection ID %s in %s.\", id, h.deleteRetiredSessionsAfter)\n\ttime.AfterFunc(h.deleteRetiredSessionsAfter, func() {\n\t\th.mutex.Lock()\n\t\tdelete(h.handlers, string(id))\n\t\th.mutex.Unlock()\n\t\th.logger.Debugf(\"Removing connection ID %s after it has been retired.\", id)\n\t})\n}\n\nfunc (h *packetHandlerMap) ReplaceWithClosed(id protocol.ConnectionID, handler packetHandler) {\n\th.mutex.Lock()\n\th.handlers[string(id)] = handler\n\th.mutex.Unlock()\n\th.logger.Debugf(\"Replacing session for connection ID %s with a closed session.\", id)\n\n\ttime.AfterFunc(h.deleteRetiredSessionsAfter, func() {\n\t\th.mutex.Lock()\n\t\thandler.shutdown()\n\t\tdelete(h.handlers, string(id))\n\t\th.mutex.Unlock()\n\t\th.logger.Debugf(\"Removing connection ID %s for a closed session after it has been retired.\", id)\n\t})\n}\n\nfunc (h *packetHandlerMap) AddResetToken(token protocol.StatelessResetToken, handler packetHandler) {\n\th.mutex.Lock()\n\th.resetTokens[token] = handler\n\th.mutex.Unlock()\n}\n\nfunc (h *packetHandlerMap) RemoveResetToken(token protocol.StatelessResetToken) {\n\th.mutex.Lock()\n\tdelete(h.resetTokens, token)\n\th.mutex.Unlock()\n}\n\nfunc (h *packetHandlerMap) RetireResetToken(token protocol.StatelessResetToken) {\n\ttime.AfterFunc(h.deleteRetiredSessionsAfter, func() {\n\t\th.mutex.Lock()\n\t\tdelete(h.resetTokens, token)\n\t\th.mutex.Unlock()\n\t})\n}\n\nfunc (h *packetHandlerMap) SetServer(s unknownPacketHandler) {\n\th.mutex.Lock()\n\th.server = s\n\th.mutex.Unlock()\n}\n\nfunc (h *packetHandlerMap) CloseServer() {\n\th.mutex.Lock()\n\tif h.server == nil {\n\t\th.mutex.Unlock()\n\t\treturn\n\t}\n\th.server = nil\n\tvar wg sync.WaitGroup\n\tfor _, handler := range h.handlers {\n\t\tif handler.getPerspective() == protocol.PerspectiveServer {\n\t\t\twg.Add(1)\n\t\t\tgo func(handler packetHandler) {\n\t\t\t\t\/\/ blocks until the CONNECTION_CLOSE has been sent and the run-loop has stopped\n\t\t\t\thandler.shutdown()\n\t\t\t\twg.Done()\n\t\t\t}(handler)\n\t\t}\n\t}\n\th.mutex.Unlock()\n\twg.Wait()\n}\n\n\/\/ Destroy the underlying connection and wait until listen() has returned.\n\/\/ It does not close active sessions.\nfunc (h *packetHandlerMap) Destroy() error {\n\tif err := h.conn.Close(); err != nil {\n\t\treturn err\n\t}\n\t<-h.listening \/\/ wait until listening returns\n\treturn nil\n}\n\nfunc (h *packetHandlerMap) close(e error) error {\n\th.mutex.Lock()\n\tif h.closed {\n\t\th.mutex.Unlock()\n\t\treturn nil\n\t}\n\n\tvar wg sync.WaitGroup\n\tfor _, handler := range h.handlers {\n\t\twg.Add(1)\n\t\tgo func(handler packetHandler) {\n\t\t\thandler.destroy(e)\n\t\t\twg.Done()\n\t\t}(handler)\n\t}\n\n\tif h.server != nil {\n\t\th.server.setCloseError(e)\n\t}\n\th.closed = true\n\th.mutex.Unlock()\n\twg.Wait()\n\treturn getMultiplexer().RemoveConn(h.conn)\n}\n\nfunc (h *packetHandlerMap) listen() {\n\tdefer close(h.listening)\n\tfor {\n\t\tp, err := h.conn.ReadPacket()\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Temporary() {\n\t\t\th.logger.Debugf(\"Temporary error reading from conn: %w\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\th.close(err)\n\t\t\treturn\n\t\t}\n\t\th.handlePacket(p)\n\t}\n}\n\nfunc (h *packetHandlerMap) handlePacket(p *receivedPacket) {\n\tconnID, err := wire.ParseConnectionID(p.data, h.connIDLen)\n\tif err != nil {\n\t\th.logger.Debugf(\"error parsing connection ID on packet from %s: %s\", p.remoteAddr, err)\n\t\tif h.tracer != nil {\n\t\t\th.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)\n\t\t}\n\t\tp.buffer.MaybeRelease()\n\t\treturn\n\t}\n\n\th.mutex.RLock()\n\tdefer h.mutex.RUnlock()\n\n\tif isStatelessReset := h.maybeHandleStatelessReset(p.data); isStatelessReset {\n\t\treturn\n\t}\n\n\thandler, handlerFound := h.handlers[string(connID)]\n\n\tif handlerFound { \/\/ existing session\n\t\thandler.handlePacket(p)\n\t\treturn\n\t}\n\tif p.data[0]&0x80 == 0 {\n\t\tgo h.maybeSendStatelessReset(p, connID)\n\t\treturn\n\t}\n\tif h.server == nil { \/\/ no server set\n\t\th.logger.Debugf(\"received a packet with an unexpected connection ID %s\", connID)\n\t\treturn\n\t}\n\th.server.handlePacket(p)\n}\n\nfunc (h *packetHandlerMap) maybeHandleStatelessReset(data []byte) bool {\n\t\/\/ stateless resets are always short header packets\n\tif data[0]&0x80 != 0 {\n\t\treturn false\n\t}\n\tif len(data) < 17 \/* type byte + 16 bytes for the reset token *\/ {\n\t\treturn false\n\t}\n\n\tvar token protocol.StatelessResetToken\n\tcopy(token[:], data[len(data)-16:])\n\tif sess, ok := h.resetTokens[token]; ok {\n\t\th.logger.Debugf(\"Received a stateless reset with token %#x. Closing session.\", token)\n\t\tgo sess.destroy(statelessResetErr{token: token})\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken {\n\tvar token protocol.StatelessResetToken\n\tif !h.statelessResetEnabled {\n\t\t\/\/ Return a random stateless reset token.\n\t\t\/\/ This token will be sent in the server's transport parameters.\n\t\t\/\/ By using a random token, an off-path attacker won't be able to disrupt the connection.\n\t\trand.Read(token[:])\n\t\treturn token\n\t}\n\th.statelessResetMutex.Lock()\n\th.statelessResetHasher.Write(connID.Bytes())\n\tcopy(token[:], h.statelessResetHasher.Sum(nil))\n\th.statelessResetHasher.Reset()\n\th.statelessResetMutex.Unlock()\n\treturn token\n}\n\nfunc (h *packetHandlerMap) maybeSendStatelessReset(p *receivedPacket, connID protocol.ConnectionID) {\n\tdefer p.buffer.Release()\n\tif !h.statelessResetEnabled {\n\t\treturn\n\t}\n\t\/\/ Don't send a stateless reset in response to very small packets.\n\t\/\/ This includes packets that could be stateless resets.\n\tif len(p.data) <= protocol.MinStatelessResetSize {\n\t\treturn\n\t}\n\ttoken := h.GetStatelessResetToken(connID)\n\th.logger.Debugf(\"Sending stateless reset to %s (connection ID: %s). Token: %#x\", p.remoteAddr, connID, token)\n\tdata := make([]byte, protocol.MinStatelessResetSize-16, protocol.MinStatelessResetSize)\n\trand.Read(data)\n\tdata[0] = (data[0] & 0x7f) | 0x40\n\tdata = append(data, token[:]...)\n\tif _, err := h.conn.WriteTo(data, p.remoteAddr); err != nil {\n\t\th.logger.Debugf(\"Error sending Stateless Reset: %s\", err)\n\t}\n}\n<commit_msg>replace the RWMutex with a Mutex in the packet handler map<commit_after>package quic\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"hash\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/protocol\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/utils\"\n\t\"github.com\/lucas-clemente\/quic-go\/internal\/wire\"\n\t\"github.com\/lucas-clemente\/quic-go\/logging\"\n)\n\ntype statelessResetErr struct {\n\ttoken protocol.StatelessResetToken\n}\n\nfunc (e statelessResetErr) Error() string {\n\treturn fmt.Sprintf(\"received a stateless reset with token %x\", e.token)\n}\n\n\/\/ The packetHandlerMap stores packetHandlers, identified by connection ID.\n\/\/ It is used:\n\/\/ * by the server to store sessions\n\/\/ * when multiplexing outgoing connections to store clients\ntype packetHandlerMap struct {\n\tmutex sync.Mutex\n\n\tconn connection\n\tconnIDLen int\n\n\thandlers map[string] \/* string(ConnectionID)*\/ packetHandler\n\tresetTokens map[protocol.StatelessResetToken] \/* stateless reset token *\/ packetHandler\n\tserver unknownPacketHandler\n\n\tlistening chan struct{} \/\/ is closed when listen returns\n\tclosed bool\n\n\tdeleteRetiredSessionsAfter time.Duration\n\n\tstatelessResetEnabled bool\n\tstatelessResetMutex sync.Mutex\n\tstatelessResetHasher hash.Hash\n\n\ttracer logging.Tracer\n\tlogger utils.Logger\n}\n\nvar _ packetHandlerManager = &packetHandlerMap{}\n\nfunc setReceiveBuffer(c net.PacketConn, logger utils.Logger) {\n\tconn, ok := c.(interface{ SetReadBuffer(int) error })\n\tif !ok {\n\t\tlogger.Debugf(\"Connection doesn't allow setting of receive buffer size\")\n\t\treturn\n\t}\n\tsize, err := inspectReadBuffer(c)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to determine receive buffer size: %s\", err)\n\t\treturn\n\t}\n\tif size >= protocol.DesiredReceiveBufferSize {\n\t\tlogger.Debugf(\"Conn has receive buffer of %d kiB (wanted: at least %d kiB)\", size\/1024, protocol.DesiredReceiveBufferSize\/1024)\n\t}\n\tif err := conn.SetReadBuffer(protocol.DesiredReceiveBufferSize); err != nil {\n\t\tlog.Printf(\"Failed to increase receive buffer size: %s\\n\", err)\n\t\treturn\n\t}\n\tnewSize, err := inspectReadBuffer(c)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to determine receive buffer size: %s\", err)\n\t\treturn\n\t}\n\tif newSize == size {\n\t\tlog.Printf(\"Failed to determine receive buffer size: %s\", err)\n\t\treturn\n\t}\n\tif newSize < protocol.DesiredReceiveBufferSize {\n\t\tlog.Printf(\"Failed to sufficiently increase receive buffer size. Was: %d kiB, wanted: %d kiB, got: %d kiB.\", size\/1024, protocol.DesiredReceiveBufferSize\/1024, newSize\/1024)\n\t\treturn\n\t}\n\tlogger.Debugf(\"Increased receive buffer size to %d kiB\", newSize\/1024)\n}\n\nfunc newPacketHandlerMap(\n\tc net.PacketConn,\n\tconnIDLen int,\n\tstatelessResetKey []byte,\n\ttracer logging.Tracer,\n\tlogger utils.Logger,\n) (packetHandlerManager, error) {\n\tsetReceiveBuffer(c, logger)\n\tconn, err := wrapConn(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &packetHandlerMap{\n\t\tconn: conn,\n\t\tconnIDLen: connIDLen,\n\t\tlistening: make(chan struct{}),\n\t\thandlers: make(map[string]packetHandler),\n\t\tresetTokens: make(map[protocol.StatelessResetToken]packetHandler),\n\t\tdeleteRetiredSessionsAfter: protocol.RetiredConnectionIDDeleteTimeout,\n\t\tstatelessResetEnabled: len(statelessResetKey) > 0,\n\t\tstatelessResetHasher: hmac.New(sha256.New, statelessResetKey),\n\t\ttracer: tracer,\n\t\tlogger: logger,\n\t}\n\tgo m.listen()\n\n\tif logger.Debug() {\n\t\tgo m.logUsage()\n\t}\n\treturn m, nil\n}\n\nfunc (h *packetHandlerMap) logUsage() {\n\tticker := time.NewTicker(2 * time.Second)\n\tvar printedZero bool\n\tfor {\n\t\tselect {\n\t\tcase <-h.listening:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t}\n\n\t\th.mutex.Lock()\n\t\tnumHandlers := len(h.handlers)\n\t\tnumTokens := len(h.resetTokens)\n\t\th.mutex.Unlock()\n\t\t\/\/ If the number tracked handlers and tokens is zero, only print it a single time.\n\t\thasZero := numHandlers == 0 && numTokens == 0\n\t\tif !hasZero || (hasZero && !printedZero) {\n\t\t\th.logger.Debugf(\"Tracking %d connection IDs and %d reset tokens.\\n\", numHandlers, numTokens)\n\t\t\tprintedZero = false\n\t\t\tif hasZero {\n\t\t\t\tprintedZero = true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler) bool \/* was added *\/ {\n\tsid := string(id)\n\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif _, ok := h.handlers[sid]; ok {\n\t\th.logger.Debugf(\"Not adding connection ID %s, as it already exists.\", id)\n\t\treturn false\n\t}\n\th.handlers[sid] = handler\n\th.logger.Debugf(\"Adding connection ID %s.\", id)\n\treturn true\n}\n\nfunc (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.ConnectionID, fn func() packetHandler) bool {\n\tsid := string(clientDestConnID)\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif _, ok := h.handlers[sid]; ok {\n\t\th.logger.Debugf(\"Not adding connection ID %s for a new session, as it already exists.\", clientDestConnID)\n\t\treturn false\n\t}\n\n\tsess := fn()\n\th.handlers[sid] = sess\n\th.handlers[string(newConnID)] = sess\n\th.logger.Debugf(\"Adding connection IDs %s and %s for a new session.\", clientDestConnID, newConnID)\n\treturn true\n}\n\nfunc (h *packetHandlerMap) Remove(id protocol.ConnectionID) {\n\th.mutex.Lock()\n\tdelete(h.handlers, string(id))\n\th.mutex.Unlock()\n\th.logger.Debugf(\"Removing connection ID %s.\", id)\n}\n\nfunc (h *packetHandlerMap) Retire(id protocol.ConnectionID) {\n\th.logger.Debugf(\"Retiring connection ID %s in %s.\", id, h.deleteRetiredSessionsAfter)\n\ttime.AfterFunc(h.deleteRetiredSessionsAfter, func() {\n\t\th.mutex.Lock()\n\t\tdelete(h.handlers, string(id))\n\t\th.mutex.Unlock()\n\t\th.logger.Debugf(\"Removing connection ID %s after it has been retired.\", id)\n\t})\n}\n\nfunc (h *packetHandlerMap) ReplaceWithClosed(id protocol.ConnectionID, handler packetHandler) {\n\th.mutex.Lock()\n\th.handlers[string(id)] = handler\n\th.mutex.Unlock()\n\th.logger.Debugf(\"Replacing session for connection ID %s with a closed session.\", id)\n\n\ttime.AfterFunc(h.deleteRetiredSessionsAfter, func() {\n\t\th.mutex.Lock()\n\t\thandler.shutdown()\n\t\tdelete(h.handlers, string(id))\n\t\th.mutex.Unlock()\n\t\th.logger.Debugf(\"Removing connection ID %s for a closed session after it has been retired.\", id)\n\t})\n}\n\nfunc (h *packetHandlerMap) AddResetToken(token protocol.StatelessResetToken, handler packetHandler) {\n\th.mutex.Lock()\n\th.resetTokens[token] = handler\n\th.mutex.Unlock()\n}\n\nfunc (h *packetHandlerMap) RemoveResetToken(token protocol.StatelessResetToken) {\n\th.mutex.Lock()\n\tdelete(h.resetTokens, token)\n\th.mutex.Unlock()\n}\n\nfunc (h *packetHandlerMap) RetireResetToken(token protocol.StatelessResetToken) {\n\ttime.AfterFunc(h.deleteRetiredSessionsAfter, func() {\n\t\th.mutex.Lock()\n\t\tdelete(h.resetTokens, token)\n\t\th.mutex.Unlock()\n\t})\n}\n\nfunc (h *packetHandlerMap) SetServer(s unknownPacketHandler) {\n\th.mutex.Lock()\n\th.server = s\n\th.mutex.Unlock()\n}\n\nfunc (h *packetHandlerMap) CloseServer() {\n\th.mutex.Lock()\n\tif h.server == nil {\n\t\th.mutex.Unlock()\n\t\treturn\n\t}\n\th.server = nil\n\tvar wg sync.WaitGroup\n\tfor _, handler := range h.handlers {\n\t\tif handler.getPerspective() == protocol.PerspectiveServer {\n\t\t\twg.Add(1)\n\t\t\tgo func(handler packetHandler) {\n\t\t\t\t\/\/ blocks until the CONNECTION_CLOSE has been sent and the run-loop has stopped\n\t\t\t\thandler.shutdown()\n\t\t\t\twg.Done()\n\t\t\t}(handler)\n\t\t}\n\t}\n\th.mutex.Unlock()\n\twg.Wait()\n}\n\n\/\/ Destroy the underlying connection and wait until listen() has returned.\n\/\/ It does not close active sessions.\nfunc (h *packetHandlerMap) Destroy() error {\n\tif err := h.conn.Close(); err != nil {\n\t\treturn err\n\t}\n\t<-h.listening \/\/ wait until listening returns\n\treturn nil\n}\n\nfunc (h *packetHandlerMap) close(e error) error {\n\th.mutex.Lock()\n\tif h.closed {\n\t\th.mutex.Unlock()\n\t\treturn nil\n\t}\n\n\tvar wg sync.WaitGroup\n\tfor _, handler := range h.handlers {\n\t\twg.Add(1)\n\t\tgo func(handler packetHandler) {\n\t\t\thandler.destroy(e)\n\t\t\twg.Done()\n\t\t}(handler)\n\t}\n\n\tif h.server != nil {\n\t\th.server.setCloseError(e)\n\t}\n\th.closed = true\n\th.mutex.Unlock()\n\twg.Wait()\n\treturn getMultiplexer().RemoveConn(h.conn)\n}\n\nfunc (h *packetHandlerMap) listen() {\n\tdefer close(h.listening)\n\tfor {\n\t\tp, err := h.conn.ReadPacket()\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Temporary() {\n\t\t\th.logger.Debugf(\"Temporary error reading from conn: %w\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\th.close(err)\n\t\t\treturn\n\t\t}\n\t\th.handlePacket(p)\n\t}\n}\n\nfunc (h *packetHandlerMap) handlePacket(p *receivedPacket) {\n\tconnID, err := wire.ParseConnectionID(p.data, h.connIDLen)\n\tif err != nil {\n\t\th.logger.Debugf(\"error parsing connection ID on packet from %s: %s\", p.remoteAddr, err)\n\t\tif h.tracer != nil {\n\t\t\th.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)\n\t\t}\n\t\tp.buffer.MaybeRelease()\n\t\treturn\n\t}\n\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif isStatelessReset := h.maybeHandleStatelessReset(p.data); isStatelessReset {\n\t\treturn\n\t}\n\n\tif handler, ok := h.handlers[string(connID)]; ok { \/\/ existing session\n\t\thandler.handlePacket(p)\n\t\treturn\n\t}\n\tif p.data[0]&0x80 == 0 {\n\t\tgo h.maybeSendStatelessReset(p, connID)\n\t\treturn\n\t}\n\tif h.server == nil { \/\/ no server set\n\t\th.logger.Debugf(\"received a packet with an unexpected connection ID %s\", connID)\n\t\treturn\n\t}\n\th.server.handlePacket(p)\n}\n\nfunc (h *packetHandlerMap) maybeHandleStatelessReset(data []byte) bool {\n\t\/\/ stateless resets are always short header packets\n\tif data[0]&0x80 != 0 {\n\t\treturn false\n\t}\n\tif len(data) < 17 \/* type byte + 16 bytes for the reset token *\/ {\n\t\treturn false\n\t}\n\n\tvar token protocol.StatelessResetToken\n\tcopy(token[:], data[len(data)-16:])\n\tif sess, ok := h.resetTokens[token]; ok {\n\t\th.logger.Debugf(\"Received a stateless reset with token %#x. Closing session.\", token)\n\t\tgo sess.destroy(statelessResetErr{token: token})\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken {\n\tvar token protocol.StatelessResetToken\n\tif !h.statelessResetEnabled {\n\t\t\/\/ Return a random stateless reset token.\n\t\t\/\/ This token will be sent in the server's transport parameters.\n\t\t\/\/ By using a random token, an off-path attacker won't be able to disrupt the connection.\n\t\trand.Read(token[:])\n\t\treturn token\n\t}\n\th.statelessResetMutex.Lock()\n\th.statelessResetHasher.Write(connID.Bytes())\n\tcopy(token[:], h.statelessResetHasher.Sum(nil))\n\th.statelessResetHasher.Reset()\n\th.statelessResetMutex.Unlock()\n\treturn token\n}\n\nfunc (h *packetHandlerMap) maybeSendStatelessReset(p *receivedPacket, connID protocol.ConnectionID) {\n\tdefer p.buffer.Release()\n\tif !h.statelessResetEnabled {\n\t\treturn\n\t}\n\t\/\/ Don't send a stateless reset in response to very small packets.\n\t\/\/ This includes packets that could be stateless resets.\n\tif len(p.data) <= protocol.MinStatelessResetSize {\n\t\treturn\n\t}\n\ttoken := h.GetStatelessResetToken(connID)\n\th.logger.Debugf(\"Sending stateless reset to %s (connection ID: %s). Token: %#x\", p.remoteAddr, connID, token)\n\tdata := make([]byte, protocol.MinStatelessResetSize-16, protocol.MinStatelessResetSize)\n\trand.Read(data)\n\tdata[0] = (data[0] & 0x7f) | 0x40\n\tdata = append(data, token[:]...)\n\tif _, err := h.conn.WriteTo(data, p.remoteAddr); err != nil {\n\t\th.logger.Debugf(\"Error sending Stateless Reset: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012-2015 Samuel Stauffer. All rights reserved.\n\/\/ Use of this source code is governed by a 3-clause BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage parser\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc TestServiceParsing(t *testing.T) {\n\tparser := &Parser{}\n\tthrift, err := parser.Parse(bytes.NewBuffer([]byte(`\n\t\tinclude \"other.thrift\"\n\n\t\tnamespace go somepkg\n\t\tnamespace python some.module123\n\t\tnamespace python.py-twisted another\n\n\t\tconst map<string,string> M1 = {\"hello\": \"world\", \"goodnight\": \"moon\"}\n\t\tconst string S1 = \"foo\\\"\\tbar\"\n\t\tconst string S2 = 'foo\\'\\tbar'\n\t\tconst list<i64> L = [1, 2, 3];\n\n\t\tunion myUnion\n\t\t{\n\t\t\t1: double dbl = 1.1;\n\t\t\t2: string str = \"2\";\n\t\t\t3: i32 int32 = 3;\n\t\t\t4: i64 int64\n\t\t\t\t= 5;\n\t\t}\n\n\t\tenum Operation\n\t\t{\n\t\t\tADD = 1,\n\t\t\tSUBTRACT = 2\n\t\t}\n\n\t\tenum NoNewLineBeforeBrace {\n\t\t\tADD = 1,\n\t\t\tSUBTRACT = 2\n\t\t}\n\n\t\tservice ServiceNAME extends SomeBase\n\t\t{\n\t\t\t# authenticate method\n\t\t\t\/\/ comment2\n\t\t\t\/* some other\n\t\t\t comments *\/\n\t\t\tstring login(1:string password) throws (1:AuthenticationException authex),\n\t\t\toneway void explode();\n\t\t\tblah something()\n\t\t}\n\n\t\tstruct SomeStruct {\n\t\t\t1: double dbl = 1.2,\n\t\t\t2: optional string abc\n\t\t}\n\n\t\tstruct NewLineBeforeBrace\n\t\t{\n\t\t\t1: double dbl = 1.2,\n\t\t\t2: optional string abc\n\t\t}`)))\n\n\tif err != nil {\n\t\tt.Fatalf(\"Service parsing failed with error %s\", err.Error())\n\t}\n\n\tif thrift.Includes[\"other\"] != \"other.thrift\" {\n\t\tt.Errorf(\"Include not parsed: %+v\", thrift.Includes)\n\t}\n\n\tif c := thrift.Constants[\"M1\"]; c == nil {\n\t\tt.Errorf(\"M1 constant missing\")\n\t} else if c.Name != \"M1\" {\n\t\tt.Errorf(\"M1 name not M1, got '%s'\", c.Name)\n\t} else if v, e := c.Type.String(), \"map<string,string>\"; v != e {\n\t\tt.Errorf(\"Expected type '%s' for M1, got '%s'\", e, v)\n\t} else if _, ok := c.Value.([]KeyValue); !ok {\n\t\tt.Errorf(\"Expected []KeyValue value for M1, got %T\", c.Value)\n\t}\n\n\tif c := thrift.Constants[\"S1\"]; c == nil {\n\t\tt.Errorf(\"S1 constant missing\")\n\t} else if v, e := c.Value.(string), \"foo\\\"\\tbar\"; e != v {\n\t\tt.Errorf(\"Excepted %s for constnat S1, got %s\", strconv.Quote(e), strconv.Quote(v))\n\t}\n\tif c := thrift.Constants[\"S2\"]; c == nil {\n\t\tt.Errorf(\"S2 constant missing\")\n\t} else if v, e := c.Value.(string), \"foo'\\tbar\"; e != v {\n\t\tt.Errorf(\"Excepted %s for constnat S2, got %s\", strconv.Quote(e), strconv.Quote(v))\n\t}\n\n\texpConst := &Constant{\n\t\tName: \"L\",\n\t\tType: &Type{\n\t\t\tName: \"list\",\n\t\t\tValueType: &Type{Name: \"i64\"},\n\t\t},\n\t\tValue: []interface{}{int64(1), int64(2), int64(3)},\n\t}\n\tif c := thrift.Constants[\"L\"]; c == nil {\n\t\tt.Errorf(\"L constant missing\")\n\t} else if !reflect.DeepEqual(c, expConst) {\n\t\tt.Errorf(\"Expected for L:\\n%s\\ngot\\n%s\", pprint(expConst), pprint(c))\n\t}\n\n\texpectedStruct := &Struct{\n\t\tName: \"SomeStruct\",\n\t\tFields: []*Field{\n\t\t\t{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"dbl\",\n\t\t\t\tDefault: 1.2,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"double\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 2,\n\t\t\t\tName: \"abc\",\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"string\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif s := thrift.Structs[\"SomeStruct\"]; s == nil {\n\t\tt.Errorf(\"SomeStruct missing\")\n\t} else if !reflect.DeepEqual(s, expectedStruct) {\n\t\tt.Errorf(\"Expected\\n%s\\ngot\\n%s\", pprint(expectedStruct), pprint(s))\n\t}\n\n\texpectedUnion := &Struct{\n\t\tName: \"myUnion\",\n\t\tFields: []*Field{\n\t\t\t{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"dbl\",\n\t\t\t\tDefault: 1.1,\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"double\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 2,\n\t\t\t\tName: \"str\",\n\t\t\t\tDefault: \"2\",\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"string\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 3,\n\t\t\t\tName: \"int32\",\n\t\t\t\tDefault: int64(3),\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"i32\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 4,\n\t\t\t\tName: \"int64\",\n\t\t\t\tDefault: int64(5),\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"i64\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif u := thrift.Unions[\"myUnion\"]; u == nil {\n\t\tt.Errorf(\"myUnion missing\")\n\t} else if !reflect.DeepEqual(u, expectedUnion) {\n\t\tt.Errorf(\"Expected\\n%s\\ngot\\n%s\", pprint(expectedUnion), pprint(u))\n\t}\n\n\texpectedEnum := &Enum{\n\t\tName: \"Operation\",\n\t\tValues: map[string]*EnumValue{\n\t\t\t\"ADD\": &EnumValue{\n\t\t\t\tName: \"ADD\",\n\t\t\t\tValue: 1,\n\t\t\t},\n\t\t\t\"SUBTRACT\": &EnumValue{\n\t\t\t\tName: \"SUBTRACT\",\n\t\t\t\tValue: 2,\n\t\t\t},\n\t\t},\n\t}\n\tif e := thrift.Enums[\"Operation\"]; e == nil {\n\t\tt.Errorf(\"enum Operation missing\")\n\t} else if !reflect.DeepEqual(e, expectedEnum) {\n\t\tt.Errorf(\"Expected\\n%s\\ngot\\n%s\", pprint(expectedEnum), pprint(e))\n\t}\n\n\tif len(thrift.Services) != 1 {\n\t\tt.Fatalf(\"Parsing service returned %d services rather than 1 as expected\", len(thrift.Services))\n\t}\n\tsvc := thrift.Services[\"ServiceNAME\"]\n\tif svc == nil || svc.Name != \"ServiceNAME\" {\n\t\tt.Fatalf(\"Parsing service expected to find 'ServiceNAME' rather than '%+v'\", thrift.Services)\n\t} else if svc.Extends != \"SomeBase\" {\n\t\tt.Errorf(\"Expected extends 'SomeBase' got '%s'\", svc.Extends)\n\t}\n\n\texpected := map[string]*Service{\n\t\t\"ServiceNAME\": &Service{\n\t\t\tName: \"ServiceNAME\",\n\t\t\tExtends: \"SomeBase\",\n\t\t\tMethods: map[string]*Method{\n\t\t\t\t\"login\": &Method{\n\t\t\t\t\tName: \"login\",\n\t\t\t\t\tReturnType: &Type{\n\t\t\t\t\t\tName: \"string\",\n\t\t\t\t\t},\n\t\t\t\t\tArguments: []*Field{\n\t\t\t\t\t\t&Field{\n\t\t\t\t\t\t\tID: 1,\n\t\t\t\t\t\t\tName: \"password\",\n\t\t\t\t\t\t\tOptional: false,\n\t\t\t\t\t\t\tType: &Type{\n\t\t\t\t\t\t\t\tName: \"string\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExceptions: []*Field{\n\t\t\t\t\t\t&Field{\n\t\t\t\t\t\t\tID: 1,\n\t\t\t\t\t\t\tName: \"authex\",\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tType: &Type{\n\t\t\t\t\t\t\t\tName: \"AuthenticationException\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"explode\": &Method{\n\t\t\t\t\tName: \"explode\",\n\t\t\t\t\tReturnType: nil,\n\t\t\t\t\tOneway: true,\n\t\t\t\t\tArguments: []*Field{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor n, m := range expected[\"ServiceNAME\"].Methods {\n\t\tif !reflect.DeepEqual(svc.Methods[n], m) {\n\t\t\tt.Fatalf(\"Parsing service returned method\\n%s\\ninstead of\\n%s\", pprint(svc.Methods[n]), pprint(m))\n\t\t}\n\t}\n}\n\nfunc TestParseConstant(t *testing.T) {\n\tparser := &Parser{}\n\tthrift, err := parser.Parse(bytes.NewBuffer([]byte(`\n\t\tconst string C1 = \"test\"\n\t\tconst string C2 = C1\n\t\t`)))\n\tif err != nil {\n\t\tt.Fatalf(\"Service parsing failed with error %s\", err.Error())\n\t}\n\n\texpected := map[string]*Constant{\n\t\t\"C1\": &Constant{\n\t\t\tName: \"C1\",\n\t\t\tType: &Type{Name: \"string\"},\n\t\t\tValue: \"test\",\n\t\t},\n\t\t\"C2\": &Constant{\n\t\t\tName: \"C2\",\n\t\t\tType: &Type{Name: \"string\"},\n\t\t\tValue: Identifier(\"C1\"),\n\t\t},\n\t}\n\tif got := thrift.Constants; !reflect.DeepEqual(expected, got) {\n\t\tt.Errorf(\"Unexpected constant parsing got\\n%s\\ninstead of\\n%s\", pprint(expected), pprint(got))\n\t}\n}\n\n\/\/ func TestParseFile(t *testing.T) {\n\/\/ \tth, err := ParseFile(\"..\/testfiles\/full.thrift\")\n\/\/ \tif err != nil {\n\/\/ \t\tt.Fatal(err)\n\/\/ \t}\n\/\/ \tb, err := json.MarshalIndent(th, \"\", \" \")\n\/\/ \tif err != nil {\n\/\/ \t\tt.Fatal(err)\n\/\/ \t}\n\/\/ \t_ = b\n\/\/ }\n\nfunc pprint(v interface{}) string {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}\n<commit_msg>Enable TestParseFile for cassandra and HBase files<commit_after>\/\/ Copyright 2012-2015 Samuel Stauffer. All rights reserved.\n\/\/ Use of this source code is governed by a 3-clause BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage parser\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc TestServiceParsing(t *testing.T) {\n\tparser := &Parser{}\n\tthrift, err := parser.Parse(bytes.NewBuffer([]byte(`\n\t\tinclude \"other.thrift\"\n\n\t\tnamespace go somepkg\n\t\tnamespace python some.module123\n\t\tnamespace python.py-twisted another\n\n\t\tconst map<string,string> M1 = {\"hello\": \"world\", \"goodnight\": \"moon\"}\n\t\tconst string S1 = \"foo\\\"\\tbar\"\n\t\tconst string S2 = 'foo\\'\\tbar'\n\t\tconst list<i64> L = [1, 2, 3];\n\n\t\tunion myUnion\n\t\t{\n\t\t\t1: double dbl = 1.1;\n\t\t\t2: string str = \"2\";\n\t\t\t3: i32 int32 = 3;\n\t\t\t4: i64 int64\n\t\t\t\t= 5;\n\t\t}\n\n\t\tenum Operation\n\t\t{\n\t\t\tADD = 1,\n\t\t\tSUBTRACT = 2\n\t\t}\n\n\t\tenum NoNewLineBeforeBrace {\n\t\t\tADD = 1,\n\t\t\tSUBTRACT = 2\n\t\t}\n\n\t\tservice ServiceNAME extends SomeBase\n\t\t{\n\t\t\t# authenticate method\n\t\t\t\/\/ comment2\n\t\t\t\/* some other\n\t\t\t comments *\/\n\t\t\tstring login(1:string password) throws (1:AuthenticationException authex),\n\t\t\toneway void explode();\n\t\t\tblah something()\n\t\t}\n\n\t\tstruct SomeStruct {\n\t\t\t1: double dbl = 1.2,\n\t\t\t2: optional string abc\n\t\t}\n\n\t\tstruct NewLineBeforeBrace\n\t\t{\n\t\t\t1: double dbl = 1.2,\n\t\t\t2: optional string abc\n\t\t}`)))\n\n\tif err != nil {\n\t\tt.Fatalf(\"Service parsing failed with error %s\", err.Error())\n\t}\n\n\tif thrift.Includes[\"other\"] != \"other.thrift\" {\n\t\tt.Errorf(\"Include not parsed: %+v\", thrift.Includes)\n\t}\n\n\tif c := thrift.Constants[\"M1\"]; c == nil {\n\t\tt.Errorf(\"M1 constant missing\")\n\t} else if c.Name != \"M1\" {\n\t\tt.Errorf(\"M1 name not M1, got '%s'\", c.Name)\n\t} else if v, e := c.Type.String(), \"map<string,string>\"; v != e {\n\t\tt.Errorf(\"Expected type '%s' for M1, got '%s'\", e, v)\n\t} else if _, ok := c.Value.([]KeyValue); !ok {\n\t\tt.Errorf(\"Expected []KeyValue value for M1, got %T\", c.Value)\n\t}\n\n\tif c := thrift.Constants[\"S1\"]; c == nil {\n\t\tt.Errorf(\"S1 constant missing\")\n\t} else if v, e := c.Value.(string), \"foo\\\"\\tbar\"; e != v {\n\t\tt.Errorf(\"Excepted %s for constnat S1, got %s\", strconv.Quote(e), strconv.Quote(v))\n\t}\n\tif c := thrift.Constants[\"S2\"]; c == nil {\n\t\tt.Errorf(\"S2 constant missing\")\n\t} else if v, e := c.Value.(string), \"foo'\\tbar\"; e != v {\n\t\tt.Errorf(\"Excepted %s for constnat S2, got %s\", strconv.Quote(e), strconv.Quote(v))\n\t}\n\n\texpConst := &Constant{\n\t\tName: \"L\",\n\t\tType: &Type{\n\t\t\tName: \"list\",\n\t\t\tValueType: &Type{Name: \"i64\"},\n\t\t},\n\t\tValue: []interface{}{int64(1), int64(2), int64(3)},\n\t}\n\tif c := thrift.Constants[\"L\"]; c == nil {\n\t\tt.Errorf(\"L constant missing\")\n\t} else if !reflect.DeepEqual(c, expConst) {\n\t\tt.Errorf(\"Expected for L:\\n%s\\ngot\\n%s\", pprint(expConst), pprint(c))\n\t}\n\n\texpectedStruct := &Struct{\n\t\tName: \"SomeStruct\",\n\t\tFields: []*Field{\n\t\t\t{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"dbl\",\n\t\t\t\tDefault: 1.2,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"double\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 2,\n\t\t\t\tName: \"abc\",\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"string\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif s := thrift.Structs[\"SomeStruct\"]; s == nil {\n\t\tt.Errorf(\"SomeStruct missing\")\n\t} else if !reflect.DeepEqual(s, expectedStruct) {\n\t\tt.Errorf(\"Expected\\n%s\\ngot\\n%s\", pprint(expectedStruct), pprint(s))\n\t}\n\n\texpectedUnion := &Struct{\n\t\tName: \"myUnion\",\n\t\tFields: []*Field{\n\t\t\t{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"dbl\",\n\t\t\t\tDefault: 1.1,\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"double\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 2,\n\t\t\t\tName: \"str\",\n\t\t\t\tDefault: \"2\",\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"string\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 3,\n\t\t\t\tName: \"int32\",\n\t\t\t\tDefault: int64(3),\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"i32\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: 4,\n\t\t\t\tName: \"int64\",\n\t\t\t\tDefault: int64(5),\n\t\t\t\tOptional: true,\n\t\t\t\tType: &Type{\n\t\t\t\t\tName: \"i64\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif u := thrift.Unions[\"myUnion\"]; u == nil {\n\t\tt.Errorf(\"myUnion missing\")\n\t} else if !reflect.DeepEqual(u, expectedUnion) {\n\t\tt.Errorf(\"Expected\\n%s\\ngot\\n%s\", pprint(expectedUnion), pprint(u))\n\t}\n\n\texpectedEnum := &Enum{\n\t\tName: \"Operation\",\n\t\tValues: map[string]*EnumValue{\n\t\t\t\"ADD\": &EnumValue{\n\t\t\t\tName: \"ADD\",\n\t\t\t\tValue: 1,\n\t\t\t},\n\t\t\t\"SUBTRACT\": &EnumValue{\n\t\t\t\tName: \"SUBTRACT\",\n\t\t\t\tValue: 2,\n\t\t\t},\n\t\t},\n\t}\n\tif e := thrift.Enums[\"Operation\"]; e == nil {\n\t\tt.Errorf(\"enum Operation missing\")\n\t} else if !reflect.DeepEqual(e, expectedEnum) {\n\t\tt.Errorf(\"Expected\\n%s\\ngot\\n%s\", pprint(expectedEnum), pprint(e))\n\t}\n\n\tif len(thrift.Services) != 1 {\n\t\tt.Fatalf(\"Parsing service returned %d services rather than 1 as expected\", len(thrift.Services))\n\t}\n\tsvc := thrift.Services[\"ServiceNAME\"]\n\tif svc == nil || svc.Name != \"ServiceNAME\" {\n\t\tt.Fatalf(\"Parsing service expected to find 'ServiceNAME' rather than '%+v'\", thrift.Services)\n\t} else if svc.Extends != \"SomeBase\" {\n\t\tt.Errorf(\"Expected extends 'SomeBase' got '%s'\", svc.Extends)\n\t}\n\n\texpected := map[string]*Service{\n\t\t\"ServiceNAME\": &Service{\n\t\t\tName: \"ServiceNAME\",\n\t\t\tExtends: \"SomeBase\",\n\t\t\tMethods: map[string]*Method{\n\t\t\t\t\"login\": &Method{\n\t\t\t\t\tName: \"login\",\n\t\t\t\t\tReturnType: &Type{\n\t\t\t\t\t\tName: \"string\",\n\t\t\t\t\t},\n\t\t\t\t\tArguments: []*Field{\n\t\t\t\t\t\t&Field{\n\t\t\t\t\t\t\tID: 1,\n\t\t\t\t\t\t\tName: \"password\",\n\t\t\t\t\t\t\tOptional: false,\n\t\t\t\t\t\t\tType: &Type{\n\t\t\t\t\t\t\t\tName: \"string\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExceptions: []*Field{\n\t\t\t\t\t\t&Field{\n\t\t\t\t\t\t\tID: 1,\n\t\t\t\t\t\t\tName: \"authex\",\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tType: &Type{\n\t\t\t\t\t\t\t\tName: \"AuthenticationException\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"explode\": &Method{\n\t\t\t\t\tName: \"explode\",\n\t\t\t\t\tReturnType: nil,\n\t\t\t\t\tOneway: true,\n\t\t\t\t\tArguments: []*Field{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor n, m := range expected[\"ServiceNAME\"].Methods {\n\t\tif !reflect.DeepEqual(svc.Methods[n], m) {\n\t\t\tt.Fatalf(\"Parsing service returned method\\n%s\\ninstead of\\n%s\", pprint(svc.Methods[n]), pprint(m))\n\t\t}\n\t}\n}\n\nfunc TestParseConstant(t *testing.T) {\n\tparser := &Parser{}\n\tthrift, err := parser.Parse(bytes.NewBuffer([]byte(`\n\t\tconst string C1 = \"test\"\n\t\tconst string C2 = C1\n\t\t`)))\n\tif err != nil {\n\t\tt.Fatalf(\"Service parsing failed with error %s\", err.Error())\n\t}\n\n\texpected := map[string]*Constant{\n\t\t\"C1\": &Constant{\n\t\t\tName: \"C1\",\n\t\t\tType: &Type{Name: \"string\"},\n\t\t\tValue: \"test\",\n\t\t},\n\t\t\"C2\": &Constant{\n\t\t\tName: \"C2\",\n\t\t\tType: &Type{Name: \"string\"},\n\t\t\tValue: Identifier(\"C1\"),\n\t\t},\n\t}\n\tif got := thrift.Constants; !reflect.DeepEqual(expected, got) {\n\t\tt.Errorf(\"Unexpected constant parsing got\\n%s\\ninstead of\\n%s\", pprint(expected), pprint(got))\n\t}\n}\n\nfunc TestParseFiles(t *testing.T) {\n\tfiles := []string{\n\t\t\"cassandra.thrift\",\n\t\t\"Hbase.thrift\",\n\t}\n\n\tfor _, f := range files {\n\t\t_, err := ParseFile(filepath.Join(\"..\/testfiles\", f))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to parse file %q: %v\", f, err)\n\t\t}\n\t}\n}\n\nfunc pprint(v interface{}) string {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \t\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage zap\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/couchbase\/vellum\"\n)\n\ntype enumTestEntry struct {\n\tkey string\n\tval uint64\n}\n\ntype enumTestWant struct {\n\tkey string\n\tidx int\n\tval uint64\n}\n\nfunc TestEnumerator(t *testing.T) {\n\ttests := []struct {\n\t\tdesc string\n\t\tin [][]enumTestEntry\n\t\twant []enumTestWant\n\t}{\n\t\t{\n\t\t\tdesc: \"two non-empty enumerators with no duplicate keys\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"b\", 2},\n\t\t\t\t\t{\"d\", 4},\n\t\t\t\t\t{\"f\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"b\", 1, 2},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"d\", 1, 4},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t\t{\"f\", 1, 6},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"two non-empty enumerators with duplicate keys\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 2},\n\t\t\t\t\t{\"c\", 4},\n\t\t\t\t\t{\"e\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"a\", 1, 2},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"c\", 1, 4},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t\t{\"e\", 1, 6},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"first iterator is empty\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 2},\n\t\t\t\t\t{\"c\", 4},\n\t\t\t\t\t{\"e\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 1, 2},\n\t\t\t\t{\"c\", 1, 4},\n\t\t\t\t{\"e\", 1, 6},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"last iterator is empty\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"two different length enumerators with duplicate keys\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 2},\n\t\t\t\t\t{\"b\", 4},\n\t\t\t\t\t{\"d\", 1000},\n\t\t\t\t\t{\"e\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"a\", 1, 2},\n\t\t\t\t{\"b\", 1, 4},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"d\", 1, 1000},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t\t{\"e\", 1, 6},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tvar itrs []vellum.Iterator\n\t\tfor _, entries := range test.in {\n\t\t\titrs = append(itrs, &testIterator{entries: entries})\n\t\t}\n\n\t\tenumerator, err := newEnumerator(itrs)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s - expected no err on newNumerator, got: %v\", test.desc, err)\n\t\t}\n\n\t\twanti := 0\n\t\tfor wanti < len(test.want) {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"%s - wanted no err, got: %v\", test.desc, err)\n\t\t\t}\n\n\t\t\tcurrK, currIdx, currV := enumerator.Current()\n\n\t\t\twant := test.want[wanti]\n\t\t\tif want.key != string(currK) {\n\t\t\t\tt.Fatalf(\"%s - wrong key, wanted: %#v, got: %q, %d, %d\", test.desc,\n\t\t\t\t\twant, currK, currIdx, currV)\n\t\t\t}\n\t\t\tif want.idx != currIdx {\n\t\t\t\tt.Fatalf(\"%s - wrong idx, wanted: %#v, got: %q, %d, %d\", test.desc,\n\t\t\t\t\twant, currK, currIdx, currV)\n\t\t\t}\n\t\t\tif want.val != currV {\n\t\t\t\tt.Fatalf(\"%s - wrong val, wanted: %#v, got: %q, %d, %d\", test.desc,\n\t\t\t\t\twant, currK, currIdx, currV)\n\t\t\t}\n\n\t\t\twanti += 1\n\n\t\t\terr = enumerator.Next()\n\t\t}\n\n\t\tif err != vellum.ErrIteratorDone {\n\t\t\tt.Fatalf(\"%s - expected ErrIteratorDone, got: %v\", test.desc, err)\n\t\t}\n\n\t\terr = enumerator.Close()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s - expected nil err on close, got: %v\", test.desc, err)\n\t\t}\n\n\t\tfor _, itr := range itrs {\n\t\t\tif itr.(*testIterator).curr != 654321 {\n\t\t\t\tt.Fatalf(\"%s - expected child iter to be closed\", test.desc)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype testIterator struct {\n\tentries []enumTestEntry\n\tcurr int\n}\n\nfunc (m *testIterator) Current() ([]byte, uint64) {\n\tif m.curr >= len(m.entries) {\n\t\treturn nil, 0\n\t}\n\treturn []byte(m.entries[m.curr].key), m.entries[m.curr].val\n}\n\nfunc (m *testIterator) Next() error {\n\tm.curr++\n\tif m.curr >= len(m.entries) {\n\t\treturn vellum.ErrIteratorDone\n\t}\n\treturn nil\n}\n\nfunc (m *testIterator) Seek(key []byte) error {\n\treturn fmt.Errorf(\"not implemented for enumerator unit tests\")\n}\n\nfunc (m *testIterator) Reset(f *vellum.FST,\n\tstartKeyInclusive, endKeyExclusive []byte, aut vellum.Automaton) error {\n\treturn fmt.Errorf(\"not implemented for enumerator unit tests\")\n}\n\nfunc (m *testIterator) Close() error {\n\tm.curr = 654321\n\treturn nil\n}\n<commit_msg>EnumeratorTests adopting Exists method<commit_after>\/\/ Copyright (c) 2018 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \t\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage zap\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/couchbase\/vellum\"\n)\n\ntype enumTestEntry struct {\n\tkey string\n\tval uint64\n}\n\ntype enumTestWant struct {\n\tkey string\n\tidx int\n\tval uint64\n}\n\nfunc TestEnumerator(t *testing.T) {\n\ttests := []struct {\n\t\tdesc string\n\t\tin [][]enumTestEntry\n\t\twant []enumTestWant\n\t}{\n\t\t{\n\t\t\tdesc: \"two non-empty enumerators with no duplicate keys\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"b\", 2},\n\t\t\t\t\t{\"d\", 4},\n\t\t\t\t\t{\"f\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"b\", 1, 2},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"d\", 1, 4},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t\t{\"f\", 1, 6},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"two non-empty enumerators with duplicate keys\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 2},\n\t\t\t\t\t{\"c\", 4},\n\t\t\t\t\t{\"e\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"a\", 1, 2},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"c\", 1, 4},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t\t{\"e\", 1, 6},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"first iterator is empty\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 2},\n\t\t\t\t\t{\"c\", 4},\n\t\t\t\t\t{\"e\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 1, 2},\n\t\t\t\t{\"c\", 1, 4},\n\t\t\t\t{\"e\", 1, 6},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"last iterator is empty\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"two different length enumerators with duplicate keys\",\n\t\t\tin: [][]enumTestEntry{\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 1},\n\t\t\t\t\t{\"c\", 3},\n\t\t\t\t\t{\"e\", 5},\n\t\t\t\t},\n\t\t\t\t[]enumTestEntry{\n\t\t\t\t\t{\"a\", 2},\n\t\t\t\t\t{\"b\", 4},\n\t\t\t\t\t{\"d\", 1000},\n\t\t\t\t\t{\"e\", 6},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []enumTestWant{\n\t\t\t\t{\"a\", 0, 1},\n\t\t\t\t{\"a\", 1, 2},\n\t\t\t\t{\"b\", 1, 4},\n\t\t\t\t{\"c\", 0, 3},\n\t\t\t\t{\"d\", 1, 1000},\n\t\t\t\t{\"e\", 0, 5},\n\t\t\t\t{\"e\", 1, 6},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tvar itrs []vellum.Iterator\n\t\tfor _, entries := range test.in {\n\t\t\titrs = append(itrs, &testIterator{entries: entries})\n\t\t}\n\n\t\tenumerator, err := newEnumerator(itrs)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s - expected no err on newNumerator, got: %v\", test.desc, err)\n\t\t}\n\n\t\twanti := 0\n\t\tfor wanti < len(test.want) {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"%s - wanted no err, got: %v\", test.desc, err)\n\t\t\t}\n\n\t\t\tcurrK, currIdx, currV := enumerator.Current()\n\n\t\t\twant := test.want[wanti]\n\t\t\tif want.key != string(currK) {\n\t\t\t\tt.Fatalf(\"%s - wrong key, wanted: %#v, got: %q, %d, %d\", test.desc,\n\t\t\t\t\twant, currK, currIdx, currV)\n\t\t\t}\n\t\t\tif want.idx != currIdx {\n\t\t\t\tt.Fatalf(\"%s - wrong idx, wanted: %#v, got: %q, %d, %d\", test.desc,\n\t\t\t\t\twant, currK, currIdx, currV)\n\t\t\t}\n\t\t\tif want.val != currV {\n\t\t\t\tt.Fatalf(\"%s - wrong val, wanted: %#v, got: %q, %d, %d\", test.desc,\n\t\t\t\t\twant, currK, currIdx, currV)\n\t\t\t}\n\n\t\t\twanti += 1\n\n\t\t\terr = enumerator.Next()\n\t\t}\n\n\t\tif err != vellum.ErrIteratorDone {\n\t\t\tt.Fatalf(\"%s - expected ErrIteratorDone, got: %v\", test.desc, err)\n\t\t}\n\n\t\terr = enumerator.Close()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s - expected nil err on close, got: %v\", test.desc, err)\n\t\t}\n\n\t\tfor _, itr := range itrs {\n\t\t\tif itr.(*testIterator).curr != 654321 {\n\t\t\t\tt.Fatalf(\"%s - expected child iter to be closed\", test.desc)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype testIterator struct {\n\tentries []enumTestEntry\n\tcurr int\n}\n\nfunc (m *testIterator) Current() ([]byte, uint64) {\n\tif m.curr >= len(m.entries) {\n\t\treturn nil, 0\n\t}\n\treturn []byte(m.entries[m.curr].key), m.entries[m.curr].val\n}\n\nfunc (m *testIterator) Next() error {\n\tm.curr++\n\tif m.curr >= len(m.entries) {\n\t\treturn vellum.ErrIteratorDone\n\t}\n\treturn nil\n}\n\nfunc (m *testIterator) Seek(key []byte) error {\n\treturn fmt.Errorf(\"not implemented for enumerator unit tests\")\n}\n\nfunc (m *testIterator) Reset(f *vellum.FST,\n\tstartKeyInclusive, endKeyExclusive []byte, aut vellum.Automaton) error {\n\treturn fmt.Errorf(\"not implemented for enumerator unit tests\")\n}\n\nfunc (m *testIterator) Close() error {\n\tm.curr = 654321\n\treturn nil\n}\n\nfunc (m *testIterator) Exists(key []byte) (bool, error) {\n\treturn false, fmt.Errorf(\"not implemented for enumerator unit tests\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package parser parses Reddit's JSON responses into protobuffers.\npackage parser\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/turnage\/redditproto\"\n)\n\n\/\/ ParseLinkListing returns a slice of Links which hold the same data the JSON\n\/\/ link listing provided contains.\nfunc ParseLinkListing(content io.ReadCloser) ([]*redditproto.Link, error) {\n\tif content == nil {\n\t\treturn nil, fmt.Errorf(\"no content provided\")\n\t}\n\n\tlisting := &redditproto.LinkListing{}\n\tdecoder := json.NewDecoder(content)\n\tif err := decoder.Decode(listing); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn unpackLinkListing(listing)\n}\n\n\/\/ ParseThread parses a combination link listing and comment listing, which\n\/\/ Reddit returns when asked for the JSON digest of a thread. This contains the\n\/\/ submission's information, and all of its comments. The returned link will\n\/\/ have the Comments field filled, and the comments will have their ReplyTree\n\/\/ field filled.\nfunc ParseThread(content io.ReadCloser) (*redditproto.Link, error) {\n\tif content == nil {\n\t\treturn nil, fmt.Errorf(\"no content provided\")\n\t}\n\n\tlistings := []interface{}{\n\t\t&redditproto.LinkListing{},\n\t\t&redditproto.CommentListing{},\n\t}\n\tdecoder := json.NewDecoder(content)\n\tif err := decoder.Decode(&listings); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(listings) != 2 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"json decoding malformed the listings: %v\",\n\t\t\tlistings)\n\t}\n\n\tlinkListing := listings[0].(*redditproto.LinkListing)\n\tcommentListing := listings[1].(*redditproto.CommentListing)\n\n\tunpackedLinks, err := unpackLinkListing(linkListing)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(unpackedLinks) != 1 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"unexpected amount of links (%d)\",\n\t\t\tlen(unpackedLinks))\n\t}\n\n\tlink := unpackedLinks[0]\n\tlink.Comments = unpackCommentListing(commentListing)\n\n\treturn link, nil\n}\n\n\/\/ ParseInbox returns a slice of messages in the inbox JSON digest.\nfunc ParseInbox(content io.ReadCloser) ([]*redditproto.Message, error) {\n\tif content == nil {\n\t\treturn nil, fmt.Errorf(\"no content provided\")\n\t}\n\n\tmessageListing := &redditproto.MessageListing{}\n\tdecoder := json.NewDecoder(content)\n\tif err := decoder.Decode(messageListing); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messageListing.GetData() == nil {\n\t\treturn nil, fmt.Errorf(\"no data in listing\")\n\t}\n\n\tif messageListing.GetData().GetChildren() == nil {\n\t\treturn nil, fmt.Errorf(\"the listing was infertile\")\n\t}\n\n\tmessages := make(\n\t\t[]*redditproto.Message,\n\t\tlen(messageListing.GetData().GetChildren()))\n\tfor i, child := range messageListing.GetData().GetChildren() {\n\t\tmessages[i] = child.GetData()\n\t}\n\n\treturn messages, nil\n}\n\n\/\/ unpackLinkListing returns a slice of the links contained in a link listing.\nfunc unpackLinkListing(\n\tlisting *redditproto.LinkListing,\n) ([]*redditproto.Link, error) {\n\tif listing.GetData() == nil {\n\t\treturn nil, fmt.Errorf(\"no data field; got %v\", listing)\n\t}\n\n\tif listing.GetData().GetChildren() == nil {\n\t\treturn nil, fmt.Errorf(\"data has no children; got %v\", listing)\n\t}\n\n\tlinks := make([]*redditproto.Link, len(listing.GetData().GetChildren()))\n\tfor i, child := range listing.GetData().GetChildren() {\n\t\tlinks[i] = child.GetData()\n\t}\n\treturn links, nil\n}\n\n\/\/ unpackCommentListing returns a slice of the comments contained in a comment\n\/\/ listing.\nfunc unpackCommentListing(\n\tlisting *redditproto.CommentListing,\n) []*redditproto.Comment {\n\tif listing.GetData() == nil {\n\t\treturn nil\n\t}\n\n\tif listing.GetData().GetChildren() == nil {\n\t\treturn nil\n\t}\n\n\tcomments := make(\n\t\t[]*redditproto.Comment,\n\t\tlen(listing.GetData().GetChildren()))\n\tfor i, child := range listing.GetData().GetChildren() {\n\t\tcomments[i] = child.GetData()\n\t\tif comments[i].Replies != nil {\n\t\t\tcomments[i].ReplyTree = unpackCommentListing(comments[i].Replies)\n\t\t\tcomments[i].Replies = nil\n\t\t}\n\t}\n\treturn comments\n}\n<commit_msg>Close all connections.<commit_after>\/\/ Package parser parses Reddit's JSON responses into protobuffers.\npackage parser\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/turnage\/redditproto\"\n)\n\n\/\/ ParseLinkListing returns a slice of Links which hold the same data the JSON\n\/\/ link listing provided contains.\nfunc ParseLinkListing(content io.ReadCloser) ([]*redditproto.Link, error) {\n\tif content == nil {\n\t\treturn nil, fmt.Errorf(\"no content provided\")\n\t}\n\tdefer content.Close()\n\n\tlisting := &redditproto.LinkListing{}\n\tdecoder := json.NewDecoder(content)\n\tif err := decoder.Decode(listing); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn unpackLinkListing(listing)\n}\n\n\/\/ ParseThread parses a combination link listing and comment listing, which\n\/\/ Reddit returns when asked for the JSON digest of a thread. This contains the\n\/\/ submission's information, and all of its comments. The returned link will\n\/\/ have the Comments field filled, and the comments will have their ReplyTree\n\/\/ field filled.\nfunc ParseThread(content io.ReadCloser) (*redditproto.Link, error) {\n\tif content == nil {\n\t\treturn nil, fmt.Errorf(\"no content provided\")\n\t}\n\tdefer content.Close()\n\n\tlistings := []interface{}{\n\t\t&redditproto.LinkListing{},\n\t\t&redditproto.CommentListing{},\n\t}\n\tdecoder := json.NewDecoder(content)\n\tif err := decoder.Decode(&listings); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(listings) != 2 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"json decoding malformed the listings: %v\",\n\t\t\tlistings)\n\t}\n\n\tlinkListing := listings[0].(*redditproto.LinkListing)\n\tcommentListing := listings[1].(*redditproto.CommentListing)\n\n\tunpackedLinks, err := unpackLinkListing(linkListing)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(unpackedLinks) != 1 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"unexpected amount of links (%d)\",\n\t\t\tlen(unpackedLinks))\n\t}\n\n\tlink := unpackedLinks[0]\n\tlink.Comments = unpackCommentListing(commentListing)\n\n\treturn link, nil\n}\n\n\/\/ ParseInbox returns a slice of messages in the inbox JSON digest.\nfunc ParseInbox(content io.ReadCloser) ([]*redditproto.Message, error) {\n\tif content == nil {\n\t\treturn nil, fmt.Errorf(\"no content provided\")\n\t}\n\tdefer content.Close()\n\n\tmessageListing := &redditproto.MessageListing{}\n\tdecoder := json.NewDecoder(content)\n\tif err := decoder.Decode(messageListing); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messageListing.GetData() == nil {\n\t\treturn nil, fmt.Errorf(\"no data in listing\")\n\t}\n\n\tif messageListing.GetData().GetChildren() == nil {\n\t\treturn nil, fmt.Errorf(\"the listing was infertile\")\n\t}\n\n\tmessages := make(\n\t\t[]*redditproto.Message,\n\t\tlen(messageListing.GetData().GetChildren()))\n\tfor i, child := range messageListing.GetData().GetChildren() {\n\t\tmessages[i] = child.GetData()\n\t}\n\n\treturn messages, nil\n}\n\n\/\/ unpackLinkListing returns a slice of the links contained in a link listing.\nfunc unpackLinkListing(\n\tlisting *redditproto.LinkListing,\n) ([]*redditproto.Link, error) {\n\tif listing.GetData() == nil {\n\t\treturn nil, fmt.Errorf(\"no data field; got %v\", listing)\n\t}\n\n\tif listing.GetData().GetChildren() == nil {\n\t\treturn nil, fmt.Errorf(\"data has no children; got %v\", listing)\n\t}\n\n\tlinks := make([]*redditproto.Link, len(listing.GetData().GetChildren()))\n\tfor i, child := range listing.GetData().GetChildren() {\n\t\tlinks[i] = child.GetData()\n\t}\n\treturn links, nil\n}\n\n\/\/ unpackCommentListing returns a slice of the comments contained in a comment\n\/\/ listing.\nfunc unpackCommentListing(\n\tlisting *redditproto.CommentListing,\n) []*redditproto.Comment {\n\tif listing.GetData() == nil {\n\t\treturn nil\n\t}\n\n\tif listing.GetData().GetChildren() == nil {\n\t\treturn nil\n\t}\n\n\tcomments := make(\n\t\t[]*redditproto.Comment,\n\t\tlen(listing.GetData().GetChildren()))\n\tfor i, child := range listing.GetData().GetChildren() {\n\t\tcomments[i] = child.GetData()\n\t\tif comments[i].Replies != nil {\n\t\t\tcomments[i].ReplyTree = unpackCommentListing(comments[i].Replies)\n\t\t\tcomments[i].Replies = nil\n\t\t}\n\t}\n\treturn comments\n}\n<|endoftext|>"} {"text":"<commit_before>package ec2\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/pchico83\/i2kit\/cli\/schemas\/environment\"\n)\n\n\/\/CreateSG creates the project security group\nfunc CreateSG(e *environment.Environment, config *aws.Config) error {\n\tvpc, err := GetVPC(e, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvc := ec2.New(session.New(), config)\n\tdescription := fmt.Sprintf(\"Security Group for environment %s\", e.Name)\n\tname := fmt.Sprintf(\"%s-i2kit\", e.Name)\n\tcsgi := &ec2.CreateSecurityGroupInput{\n\t\tDescription: &description,\n\t\tGroupName: &name,\n\t\tVpcId: &vpc,\n\t}\n\tsg, err := svc.CreateSecurityGroup(csgi)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"InvalidGroup.Duplicate\") {\n\t\t\treturn err\n\t\t}\n\t\tdsgi := &ec2.DescribeSecurityGroupsInput{\n\t\t\tGroupNames: []*string{&name},\n\t\t}\n\t\tsgs, err2 := svc.DescribeSecurityGroups(dsgi)\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tfor _, i := range sgs.SecurityGroups {\n\t\t\tif *i.VpcId == vpc {\n\t\t\t\te.Provider.SecurityGroup = *i.GroupId\n\t\t\t}\n\t\t}\n\t} else {\n\t\te.Provider.SecurityGroup = *sg.GroupId\n\t}\n\n\tif e.Provider.SecurityGroup == \"\" {\n\t\treturn fmt.Errorf(\"Error retrieving SG '%s'\", name)\n\t}\n\n\tasgii := &ec2.AuthorizeSecurityGroupIngressInput{\n\t\tGroupName: &name,\n\t\tSourceSecurityGroupName: &name,\n\t}\n\tprotocol := \"-1\"\n\tif _, err = svc.AuthorizeSecurityGroupIngress(asgii); err != nil {\n\t\tif strings.Contains(err.Error(), \"InvalidParameterValue\") {\n\t\t\tasgii = &ec2.AuthorizeSecurityGroupIngressInput{\n\t\t\t\tGroupId: &e.Provider.SecurityGroup,\n\t\t\t\tIpPermissions: []*ec2.IpPermission{\n\t\t\t\t\t&ec2.IpPermission{\n\t\t\t\t\t\tIpProtocol: &protocol,\n\t\t\t\t\t\tUserIdGroupPairs: []*ec2.UserIdGroupPair{\n\t\t\t\t\t\t\t&ec2.UserIdGroupPair{\n\t\t\t\t\t\t\t\tGroupId: &e.Provider.SecurityGroup,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err = svc.AuthorizeSecurityGroupIngress(asgii)\n\t\t}\n\t}\n\tif err != nil && !strings.Contains(err.Error(), \"InvalidPermission.Duplicate\") {\n\t\treturn err\n\t}\n\tvar ports int64\n\tports = 22\n\tcidrIP := \"0.0.0.0\/0\"\n\tasgii = &ec2.AuthorizeSecurityGroupIngressInput{\n\t\tFromPort: &ports,\n\t\tToPort: &ports,\n\t\tGroupName: &name,\n\t\tIpProtocol: &protocol,\n\t\tCidrIp: &cidrIP,\n\t}\n\tif _, err = svc.AuthorizeSecurityGroupIngress(asgii); err != nil {\n\t\tif strings.Contains(err.Error(), \"InvalidParameterValue\") {\n\t\t\tasgii.SetGroupName(\"\")\n\t\t\tasgii.SetGroupId(e.Provider.SecurityGroup)\n\t\t\t_, err = svc.AuthorizeSecurityGroupIngress(asgii)\n\t\t}\n\t}\n\tif err != nil && !strings.Contains(err.Error(), \"InvalidPermission.Duplicate\") {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>use a filter to find security groups by name (#72)<commit_after>package ec2\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/pchico83\/i2kit\/cli\/schemas\/environment\"\n)\n\nvar groupNameFilter = \"group-name\"\n\n\/\/CreateSG creates the project security group\nfunc CreateSG(e *environment.Environment, config *aws.Config) error {\n\tvpc, err := GetVPC(e, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvc := ec2.New(session.New(), config)\n\tdescription := fmt.Sprintf(\"Security Group for environment %s\", e.Name)\n\tname := fmt.Sprintf(\"%s-i2kit\", e.Name)\n\tcsgi := &ec2.CreateSecurityGroupInput{\n\t\tDescription: &description,\n\t\tGroupName: &name,\n\t\tVpcId: &vpc,\n\t}\n\tsg, err := svc.CreateSecurityGroup(csgi)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"InvalidGroup.Duplicate\") {\n\t\t\treturn err\n\t\t}\n\n\t\tdsgi := &ec2.DescribeSecurityGroupsInput{\n\t\t\tFilters: []*ec2.Filter{&ec2.Filter{Name: &groupNameFilter, Values: []*string{&name}}},\n\t\t}\n\t\tsgs, err2 := svc.DescribeSecurityGroups(dsgi)\n\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\n\t\tfor _, i := range sgs.SecurityGroups {\n\t\t\tif *i.VpcId == vpc {\n\t\t\t\te.Provider.SecurityGroup = *i.GroupId\n\t\t\t}\n\t\t}\n\t} else {\n\t\te.Provider.SecurityGroup = *sg.GroupId\n\t}\n\n\tif e.Provider.SecurityGroup == \"\" {\n\t\treturn fmt.Errorf(\"Error retrieving SG '%s'\", name)\n\t}\n\n\tasgii := &ec2.AuthorizeSecurityGroupIngressInput{\n\t\tGroupName: &name,\n\t\tSourceSecurityGroupName: &name,\n\t}\n\tprotocol := \"-1\"\n\t_, err = svc.AuthorizeSecurityGroupIngress(asgii)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"InvalidParameterValue\") {\n\t\t\tasgii = &ec2.AuthorizeSecurityGroupIngressInput{\n\t\t\t\tGroupId: &e.Provider.SecurityGroup,\n\t\t\t\tIpPermissions: []*ec2.IpPermission{\n\t\t\t\t\t&ec2.IpPermission{\n\t\t\t\t\t\tIpProtocol: &protocol,\n\t\t\t\t\t\tUserIdGroupPairs: []*ec2.UserIdGroupPair{\n\t\t\t\t\t\t\t&ec2.UserIdGroupPair{\n\t\t\t\t\t\t\t\tGroupId: &e.Provider.SecurityGroup,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err = svc.AuthorizeSecurityGroupIngress(asgii)\n\t\t}\n\t}\n\n\tif err != nil && !strings.Contains(err.Error(), \"InvalidPermission.Duplicate\") {\n\t\treturn err\n\t}\n\tvar ports int64\n\tports = 22\n\tcidrIP := \"0.0.0.0\/0\"\n\tasgii = &ec2.AuthorizeSecurityGroupIngressInput{\n\t\tFromPort: &ports,\n\t\tToPort: &ports,\n\t\tGroupName: &name,\n\t\tIpProtocol: &protocol,\n\t\tCidrIp: &cidrIP,\n\t}\n\tif _, err = svc.AuthorizeSecurityGroupIngress(asgii); err != nil {\n\t\tif strings.Contains(err.Error(), \"InvalidParameterValue\") {\n\t\t\tasgii.SetGroupName(\"\")\n\t\t\tasgii.SetGroupId(e.Provider.SecurityGroup)\n\t\t\t_, err = svc.AuthorizeSecurityGroupIngress(asgii)\n\t\t}\n\t}\n\tif err != nil && !strings.Contains(err.Error(), \"InvalidPermission.Duplicate\") {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tgcontext \"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/Microsoft\/hcsshim\/internal\/hcs\"\n\t\"github.com\/Microsoft\/hcsshim\/internal\/oc\"\n\t\"github.com\/containerd\/containerd\/runtime\/v2\/task\"\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/urfave\/cli\"\n\t\"go.opencensus.io\/trace\"\n)\n\nvar deleteCommand = cli.Command{\n\tName: \"delete\",\n\tUsage: `\nThis command allows containerd to delete any container resources created, mounted, and\/or run by a shim when containerd can no longer communicate over rpc. This happens if a shim is SIGKILL'd with a running container. These resources will need to be cleaned up when containerd loses the connection to a shim. This is also used when containerd boots and reconnects to shims. If a bundle is still on disk but containerd cannot connect to a shim, the delete command is invoked.\n\nThe delete command will be executed in the container's bundle as its cwd.\n`,\n\tSkipArgReorder: true,\n\tAction: func(context *cli.Context) (err error) {\n\t\t\/\/ We cant write anything to stdout for this cmd other than the\n\t\t\/\/ task.DeleteResponse by protcol. We can write to stderr which will be\n\t\t\/\/ warning logged in containerd.\n\n\t\tctx, span := trace.StartSpan(gcontext.Background(), \"delete\")\n\t\tdefer span.End()\n\t\tdefer func() { oc.SetSpanStatus(span, err) }()\n\n\t\tbundleFlag := context.GlobalString(\"bundle\")\n\t\tif bundleFlag == \"\" {\n\t\t\treturn errors.New(\"bundle is required\")\n\t\t}\n\n\t\t\/\/ hcsshim shim writes panic logs in the bundle directory in a file named \"panic.log\"\n\t\t\/\/ log those messages (if any) on stderr so that it shows up in containerd's log.\n\t\t\/\/ This should be done as the first thing so that we don't miss any panic logs even if\n\t\t\/\/ something goes wrong during delete op.\n\t\tlogs, err := ioutil.ReadFile(filepath.Join(bundleFlag, \"panic.log\"))\n\t\tif err == nil && len(logs) > 0 {\n\t\t\tlogrus.WithField(\"log\", string(logs)).Error(\"found shim panic logs during delete\")\n\t\t}\n\n\t\t\/\/ Attempt to find the hcssystem for this bundle and terminate it.\n\t\tif sys, _ := hcs.OpenComputeSystem(ctx, idFlag); sys != nil {\n\t\t\tdefer sys.Close()\n\t\t\tif err := sys.Terminate(ctx); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to terminate '%s': %v\", idFlag, err)\n\t\t\t} else {\n\t\t\t\tch := make(chan error, 1)\n\t\t\t\tgo func() { ch <- sys.Wait() }()\n\t\t\t\tt := time.NewTimer(time.Second * 30)\n\t\t\t\tselect {\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tsys.Close()\n\t\t\t\t\treturn fmt.Errorf(\"timed out waiting for '%s' to terminate\", idFlag)\n\t\t\t\tcase err := <-ch:\n\t\t\t\t\tt.Stop()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to wait for '%s' to terminate: %v\", idFlag, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Determine if the config file was a POD and if so kill the whole POD.\n\t\tif s, err := getSpecAnnotations(bundleFlag); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif containerType := s[\"io.kubernetes.cri.container-type\"]; containerType == \"container\" {\n\t\t\t\tif sandboxID := s[\"io.kubernetes.cri.sandbox-id\"]; sandboxID != \"\" {\n\t\t\t\t\tif sys, _ := hcs.OpenComputeSystem(ctx, sandboxID); sys != nil {\n\t\t\t\t\t\tif err := sys.Terminate(ctx); err != nil {\n\t\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to terminate '%s': %v\", idFlag, err)\n\t\t\t\t\t\t} else if err := sys.Wait(); err != nil {\n\t\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to wait for '%s' to terminate: %v\", idFlag, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsys.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Remove the bundle on disk\n\t\tif err := os.RemoveAll(bundleFlag); err != nil && !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tif data, err := proto.Marshal(&task.DeleteResponse{\n\t\t\tExitedAt: time.Now(),\n\t\t\tExitStatus: 255,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tif _, err := os.Stdout.Write(data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n}\n<commit_msg>Read max 1MB data from panic.log<commit_after>package main\n\nimport (\n\tgcontext \"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/Microsoft\/hcsshim\/internal\/hcs\"\n\t\"github.com\/Microsoft\/hcsshim\/internal\/oc\"\n\t\"github.com\/containerd\/containerd\/runtime\/v2\/task\"\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/urfave\/cli\"\n\t\"go.opencensus.io\/trace\"\n)\n\n\/\/ LimitedRead reads at max `readLimitBytes` bytes from the file at path `filePath`. If the file has\n\/\/ more than `readLimitBytes` bytes of data then first `readLimitBytes` will be returned.\nfunc limitedRead(filePath string, readLimitBytes int64) ([]byte, error) {\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"limited read failed to open file: %s\", filePath)\n\t}\n\tdefer f.Close()\n\tif fi, err := f.Stat(); err == nil {\n\t\tif fi.Size() < readLimitBytes {\n\t\t\treadLimitBytes = fi.Size()\n\t\t}\n\t\tbuf := make([]byte, readLimitBytes)\n\t\t_, err := f.Read(buf)\n\t\tif err != nil {\n\t\t\treturn []byte{}, errors.Wrapf(err, \"limited read failed during file read: %s\", filePath)\n\t\t}\n\t\treturn buf, nil\n\t}\n\treturn []byte{}, errors.Wrapf(err, \"limited read failed during file stat: %s\", filePath)\n}\n\nvar deleteCommand = cli.Command{\n\tName: \"delete\",\n\tUsage: `\nThis command allows containerd to delete any container resources created, mounted, and\/or run by a shim when containerd can no longer communicate over rpc. This happens if a shim is SIGKILL'd with a running container. These resources will need to be cleaned up when containerd loses the connection to a shim. This is also used when containerd boots and reconnects to shims. If a bundle is still on disk but containerd cannot connect to a shim, the delete command is invoked.\n\nThe delete command will be executed in the container's bundle as its cwd.\n`,\n\tSkipArgReorder: true,\n\tAction: func(context *cli.Context) (err error) {\n\t\t\/\/ We cant write anything to stdout for this cmd other than the\n\t\t\/\/ task.DeleteResponse by protcol. We can write to stderr which will be\n\t\t\/\/ warning logged in containerd.\n\n\t\tctx, span := trace.StartSpan(gcontext.Background(), \"delete\")\n\t\tdefer span.End()\n\t\tdefer func() { oc.SetSpanStatus(span, err) }()\n\n\t\tbundleFlag := context.GlobalString(\"bundle\")\n\t\tif bundleFlag == \"\" {\n\t\t\treturn errors.New(\"bundle is required\")\n\t\t}\n\n\t\t\/\/ hcsshim shim writes panic logs in the bundle directory in a file named \"panic.log\"\n\t\t\/\/ log those messages (if any) on stderr so that it shows up in containerd's log.\n\t\t\/\/ This should be done as the first thing so that we don't miss any panic logs even if\n\t\t\/\/ something goes wrong during delete op.\n\t\t\/\/ The file can be very large so read only first 1MB of data.\n\t\treadLimit := int64(1024 * 1024) \/\/ 1MB\n\t\tlogBytes, err := limitedRead(filepath.Join(bundleFlag, \"panic.log\"), readLimit)\n\t\tif err == nil && len(logBytes) > 0 {\n\t\t\tif int64(len(logBytes)) == readLimit {\n\t\t\t\tlogrus.Warnf(\"shim panic log file %s is larger than 1MB, logging only first 1MB\", filepath.Join(bundleFlag, \"panic.log\"))\n\t\t\t}\n\t\t\tlogrus.WithField(\"log\", string(logBytes)).Warn(\"found shim panic logs during delete\")\n\t\t} else if err != nil && !errors.Is(err, os.ErrNotExist) {\n\t\t\tlogrus.WithError(err).Warn(\"failed to open shim panic log\")\n\t\t}\n\n\t\t\/\/ Attempt to find the hcssystem for this bundle and terminate it.\n\t\tif sys, _ := hcs.OpenComputeSystem(ctx, idFlag); sys != nil {\n\t\t\tdefer sys.Close()\n\t\t\tif err := sys.Terminate(ctx); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to terminate '%s': %v\", idFlag, err)\n\t\t\t} else {\n\t\t\t\tch := make(chan error, 1)\n\t\t\t\tgo func() { ch <- sys.Wait() }()\n\t\t\t\tt := time.NewTimer(time.Second * 30)\n\t\t\t\tselect {\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tsys.Close()\n\t\t\t\t\treturn fmt.Errorf(\"timed out waiting for '%s' to terminate\", idFlag)\n\t\t\t\tcase err := <-ch:\n\t\t\t\t\tt.Stop()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to wait for '%s' to terminate: %v\", idFlag, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Determine if the config file was a POD and if so kill the whole POD.\n\t\tif s, err := getSpecAnnotations(bundleFlag); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif containerType := s[\"io.kubernetes.cri.container-type\"]; containerType == \"container\" {\n\t\t\t\tif sandboxID := s[\"io.kubernetes.cri.sandbox-id\"]; sandboxID != \"\" {\n\t\t\t\t\tif sys, _ := hcs.OpenComputeSystem(ctx, sandboxID); sys != nil {\n\t\t\t\t\t\tif err := sys.Terminate(ctx); err != nil {\n\t\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to terminate '%s': %v\", idFlag, err)\n\t\t\t\t\t\t} else if err := sys.Wait(); err != nil {\n\t\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to wait for '%s' to terminate: %v\", idFlag, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsys.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Remove the bundle on disk\n\t\tif err := os.RemoveAll(bundleFlag); err != nil && !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tif data, err := proto.Marshal(&task.DeleteResponse{\n\t\t\tExitedAt: time.Now(),\n\t\t\tExitStatus: 255,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tif _, err := os.Stdout.Write(data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package v2\n\nimport (\n\t\"os\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/cmd\"\n\t\"code.cloudfoundry.org\/cli\/commands\"\n\t\"code.cloudfoundry.org\/cli\/commands\/flags\"\n)\n\ntype UpdateBuildpackCommand struct {\n\tRequiredArgs flags.SetSpaceQuotaArgs `positional-args:\"yes\"`\n\tDisable bool `long:\"disable\" description:\"Disable the buildpack from being used for staging\"`\n\tEnable bool `long:\"enable\" description:\"Enable the buildpack to be used for staging\"`\n\tOrder int `short:\"i\" description:\"The order in which the buildpacks are checked during buildpack auto-detection\"`\n\tLock bool `long:\"lock\" description:\"Lock the buildpack to prevent updates\"`\n\tPath int `short:\"p\" description:\"Path to directory or zip file\"`\n\tUnlock bool `long:\"unlock\" description:\"Unlock the buildpack to enable updates\"`\n\tusage interface{} `usage:\"CF_NAME update-buildpack BUILDPACK [-p PATH] [-i POSITION] [--enable|--disable] [--lock|--unlock]\\n\\nTIP:\\n Path should be a zip file, a url to a zip file, or a local directory. Position is a positive integer, sets priority, and is sorted from lowest to highest.\"`\n\trelatedCommands interface{} `related_commands:\"buildpacks, rename-buildpack\"`\n}\n\nfunc (_ UpdateBuildpackCommand) Setup(config commands.Config, ui commands.UI) error {\n\treturn nil\n}\n\nfunc (_ UpdateBuildpackCommand) Execute(args []string) error {\n\tcmd.Main(os.Getenv(\"CF_TRACE\"), os.Args)\n\treturn nil\n}\n<commit_msg>use the correct arguments for UpdateBuildpackCommand<commit_after>package v2\n\nimport (\n\t\"os\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/cmd\"\n\t\"code.cloudfoundry.org\/cli\/commands\"\n\t\"code.cloudfoundry.org\/cli\/commands\/flags\"\n)\n\ntype UpdateBuildpackCommand struct {\n\tRequiredArgs flags.Buildpack `positional-args:\"yes\"`\n\tDisable bool `long:\"disable\" description:\"Disable the buildpack from being used for staging\"`\n\tEnable bool `long:\"enable\" description:\"Enable the buildpack to be used for staging\"`\n\tOrder int `short:\"i\" description:\"The order in which the buildpacks are checked during buildpack auto-detection\"`\n\tLock bool `long:\"lock\" description:\"Lock the buildpack to prevent updates\"`\n\tPath int `short:\"p\" description:\"Path to directory or zip file\"`\n\tUnlock bool `long:\"unlock\" description:\"Unlock the buildpack to enable updates\"`\n\tusage interface{} `usage:\"CF_NAME update-buildpack BUILDPACK [-p PATH] [-i POSITION] [--enable|--disable] [--lock|--unlock]\\n\\nTIP:\\n Path should be a zip file, a url to a zip file, or a local directory. Position is a positive integer, sets priority, and is sorted from lowest to highest.\"`\n\trelatedCommands interface{} `related_commands:\"buildpacks, rename-buildpack\"`\n}\n\nfunc (_ UpdateBuildpackCommand) Setup(config commands.Config, ui commands.UI) error {\n\treturn nil\n}\n\nfunc (_ UpdateBuildpackCommand) Execute(args []string) error {\n\tcmd.Main(os.Getenv(\"CF_TRACE\"), os.Args)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/kisielk\/whisper-go\/whisper\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n)\n\nvar from, until uint\n\nfunc usage() {\n\tlog.Fatal(\"Wrong number of arguments\")\n}\n\nfunc main() {\n\tnow := uint(time.Now().Unix())\n\tyesterday := uint(time.Now().Add(-24 * time.Hour).Unix())\n\tflag.UintVar(&from, \"from\", yesterday, \"Unix epoch time of the beginning of the requested interval. (default: 24 hours ago)\")\n\tflag.UintVar(&until, \"until\", now, \"Unix epoch time of the end of the requested interval. (default: now)\")\n\tflag.Parse()\n\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t}\n\n\tpath := flag.Args()[0]\n\tfromTime := uint32(from)\n\tuntilTime := uint32(until)\n\n\tw, err := whisper.Open(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tinterval, points, err := w.FetchUntil(fromTime, untilTime)\n\n\tfmt.Printf(\"Values in interval %q\", interval)\n\tfor i, p := range points {\n\t\tfmt.Printf(\"%d %q\\n\", i, p)\n\t}\n\treturn\n}\n<commit_msg>Better printing for whisper-fetch<commit_after>package main\n\nimport (\n\t\"github.com\/kisielk\/whisper-go\/whisper\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n)\n\nvar from, until uint\n\nfunc usage() {\n\tlog.Fatal(\"Wrong number of arguments\")\n}\n\nfunc main() {\n\tnow := uint(time.Now().Unix())\n\tyesterday := uint(time.Now().Add(-24 * time.Hour).Unix())\n\tflag.UintVar(&from, \"from\", yesterday, \"Unix epoch time of the beginning of the requested interval. (default: 24 hours ago)\")\n\tflag.UintVar(&until, \"until\", now, \"Unix epoch time of the end of the requested interval. (default: now)\")\n\tflag.Parse()\n\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t}\n\n\tpath := flag.Args()[0]\n\tfromTime := uint32(from)\n\tuntilTime := uint32(until)\n\n\tw, err := whisper.Open(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tinterval, points, err := w.FetchUntil(fromTime, untilTime)\n\n\tfmt.Printf(\"Values in interval %+v\\n\", interval)\n\tfor i, p := range points {\n\t\tfmt.Printf(\"%d %v\\n\", i, p)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package swift\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gophercloud\/gophercloud\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\"\n\n\t\"github.com\/hashicorp\/terraform\/backend\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\ttf_openstack \"github.com\/terraform-providers\/terraform-provider-openstack\/openstack\"\n)\n\n\/\/ New creates a new backend for Swift remote state.\nfunc New() backend.Backend {\n\ts := &schema.Backend{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"auth_url\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_AUTH_URL\", nil),\n\t\t\t\tDescription: descriptions[\"auth_url\"],\n\t\t\t},\n\n\t\t\t\"user_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_USER_ID\", \"\"),\n\t\t\t\tDescription: descriptions[\"user_name\"],\n\t\t\t},\n\n\t\t\t\"user_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_USERNAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"user_name\"],\n\t\t\t},\n\n\t\t\t\"tenant_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_TENANT_ID\",\n\t\t\t\t\t\"OS_PROJECT_ID\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"tenant_id\"],\n\t\t\t},\n\n\t\t\t\"tenant_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_TENANT_NAME\",\n\t\t\t\t\t\"OS_PROJECT_NAME\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"tenant_name\"],\n\t\t\t},\n\n\t\t\t\"password\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tSensitive: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_PASSWORD\", \"\"),\n\t\t\t\tDescription: descriptions[\"password\"],\n\t\t\t},\n\n\t\t\t\"token\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_AUTH_TOKEN\", \"\"),\n\t\t\t\tDescription: descriptions[\"token\"],\n\t\t\t},\n\n\t\t\t\"domain_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_USER_DOMAIN_ID\",\n\t\t\t\t\t\"OS_PROJECT_DOMAIN_ID\",\n\t\t\t\t\t\"OS_DOMAIN_ID\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"domain_id\"],\n\t\t\t},\n\n\t\t\t\"domain_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_USER_DOMAIN_NAME\",\n\t\t\t\t\t\"OS_PROJECT_DOMAIN_NAME\",\n\t\t\t\t\t\"OS_DOMAIN_NAME\",\n\t\t\t\t\t\"OS_DEFAULT_DOMAIN\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"domain_name\"],\n\t\t\t},\n\n\t\t\t\"region_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_REGION_NAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"region_name\"],\n\t\t\t},\n\n\t\t\t\"insecure\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_INSECURE\", nil),\n\t\t\t\tDescription: descriptions[\"insecure\"],\n\t\t\t},\n\n\t\t\t\"endpoint_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_ENDPOINT_TYPE\", \"\"),\n\t\t\t},\n\n\t\t\t\"cacert_file\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_CACERT\", \"\"),\n\t\t\t\tDescription: descriptions[\"cacert_file\"],\n\t\t\t},\n\n\t\t\t\"cert\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_CERT\", \"\"),\n\t\t\t\tDescription: descriptions[\"cert\"],\n\t\t\t},\n\n\t\t\t\"key\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_KEY\", \"\"),\n\t\t\t\tDescription: descriptions[\"key\"],\n\t\t\t},\n\n\t\t\t\"path\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"path\"],\n\t\t\t\tDeprecated: \"Use container instead\",\n\t\t\t\tConflictsWith: []string{\"container\"},\n\t\t\t},\n\n\t\t\t\"container\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"container\"],\n\t\t\t},\n\n\t\t\t\"archive_path\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"archive_path\"],\n\t\t\t\tDeprecated: \"Use archive_container instead\",\n\t\t\t\tConflictsWith: []string{\"archive_container\"},\n\t\t\t},\n\n\t\t\t\"archive_container\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"archive_container\"],\n\t\t\t},\n\n\t\t\t\"expire_after\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"expire_after\"],\n\t\t\t},\n\n\t\t\t\"lock\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Lock state access\",\n\t\t\t\tDefault: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tresult := &Backend{Backend: s}\n\tresult.Backend.ConfigureFunc = result.configure\n\treturn result\n}\n\nvar descriptions map[string]string\n\nfunc init() {\n\tdescriptions = map[string]string{\n\t\t\"auth_url\": \"The Identity authentication URL.\",\n\n\t\t\"user_name\": \"Username to login with.\",\n\n\t\t\"user_id\": \"User ID to login with.\",\n\n\t\t\"tenant_id\": \"The ID of the Tenant (Identity v2) or Project (Identity v3)\\n\" +\n\t\t\t\"to login with.\",\n\n\t\t\"tenant_name\": \"The name of the Tenant (Identity v2) or Project (Identity v3)\\n\" +\n\t\t\t\"to login with.\",\n\n\t\t\"password\": \"Password to login with.\",\n\n\t\t\"token\": \"Authentication token to use as an alternative to username\/password.\",\n\n\t\t\"domain_id\": \"The ID of the Domain to scope to (Identity v3).\",\n\n\t\t\"domain_name\": \"The name of the Domain to scope to (Identity v3).\",\n\n\t\t\"region_name\": \"The name of the Region to use.\",\n\n\t\t\"insecure\": \"Trust self-signed certificates.\",\n\n\t\t\"cacert_file\": \"A Custom CA certificate.\",\n\n\t\t\"endpoint_type\": \"The catalog endpoint type to use.\",\n\n\t\t\"cert\": \"A client certificate to authenticate with.\",\n\n\t\t\"key\": \"A client private key to authenticate with.\",\n\n\t\t\"path\": \"Swift container path to use.\",\n\n\t\t\"container\": \"Swift container to create\",\n\n\t\t\"archive_path\": \"Swift container path to archive state to.\",\n\n\t\t\"archive_container\": \"Swift container to archive state to.\",\n\n\t\t\"expire_after\": \"Archive object expiry duration.\",\n\t}\n}\n\ntype Backend struct {\n\t*schema.Backend\n\n\t\/\/ Fields below are set from configure\n\tclient *gophercloud.ServiceClient\n\tarchive bool\n\tarchiveContainer string\n\texpireSecs int\n\tcontainer string\n\tlock bool\n}\n\nfunc (b *Backend) configure(ctx context.Context) error {\n\tif b.client != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Grab the resource data\n\tdata := schema.FromContextBackendConfig(ctx)\n\tconfig := &tf_openstack.Config{\n\t\tCACertFile: data.Get(\"cacert_file\").(string),\n\t\tClientCertFile: data.Get(\"cert\").(string),\n\t\tClientKeyFile: data.Get(\"key\").(string),\n\t\tDomainID: data.Get(\"domain_id\").(string),\n\t\tDomainName: data.Get(\"domain_name\").(string),\n\t\tEndpointType: data.Get(\"endpoint_type\").(string),\n\t\tIdentityEndpoint: data.Get(\"auth_url\").(string),\n\t\tPassword: data.Get(\"password\").(string),\n\t\tToken: data.Get(\"token\").(string),\n\t\tTenantID: data.Get(\"tenant_id\").(string),\n\t\tTenantName: data.Get(\"tenant_name\").(string),\n\t\tUsername: data.Get(\"user_name\").(string),\n\t\tUserID: data.Get(\"user_id\").(string),\n\t}\n\n\tif v, ok := data.GetOkExists(\"insecure\"); ok {\n\t\tinsecure := v.(bool)\n\t\tconfig.Insecure = &insecure\n\t}\n\n\tif err := config.LoadAndValidate(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Assign Container\n\tb.container = data.Get(\"container\").(string)\n\tif b.container == \"\" {\n\t\t\/\/ Check deprecated field\n\t\tb.container = data.Get(\"path\").(string)\n\t}\n\n\t\/\/ Store the lock information\n\tb.lock = data.Get(\"lock\").(bool)\n\n\t\/\/ Enable object archiving?\n\tif archiveContainer, ok := data.GetOk(\"archive_container\"); ok {\n\t\tlog.Printf(\"[DEBUG] Archive_container set, enabling object versioning\")\n\t\tb.archive = true\n\t\tb.archiveContainer = archiveContainer.(string)\n\t} else if archivePath, ok := data.GetOk(\"archive_path\"); ok {\n\t\tlog.Printf(\"[DEBUG] Archive_path set, enabling object versioning\")\n\t\tb.archive = true\n\t\tb.archiveContainer = archivePath.(string)\n\t}\n\n\t\/\/ Enable object expiry?\n\tif expireRaw, ok := data.GetOk(\"expire_after\"); ok {\n\t\texpire := expireRaw.(string)\n\t\tlog.Printf(\"[DEBUG] Requested that remote state expires after %s\", expire)\n\n\t\tif strings.HasSuffix(expire, \"d\") {\n\t\t\tlog.Printf(\"[DEBUG] Got a days expire after duration. Converting to hours\")\n\t\t\tdays, err := strconv.Atoi(expire[:len(expire)-1])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error converting expire_after value %s to int: %s\", expire, err)\n\t\t\t}\n\n\t\t\texpire = fmt.Sprintf(\"%dh\", days*24)\n\t\t\tlog.Printf(\"[DEBUG] Expire after %s hours\", expire)\n\t\t}\n\n\t\texpireDur, err := time.ParseDuration(expire)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[DEBUG] Error parsing duration %s: %s\", expire, err)\n\t\t\treturn fmt.Errorf(\"Error parsing expire_after duration '%s': %s\", expire, err)\n\t\t}\n\t\tlog.Printf(\"[DEBUG] Seconds duration = %d\", int(expireDur.Seconds()))\n\t\tb.expireSecs = int(expireDur.Seconds())\n\t}\n\n\tobjClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{\n\t\tRegion: data.Get(\"region_name\").(string),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.client = objClient\n\n\treturn nil\n}\n<commit_msg>backend\/swift: Authentication updates<commit_after>package swift\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gophercloud\/gophercloud\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\"\n\n\t\"github.com\/hashicorp\/terraform\/backend\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\ttf_openstack \"github.com\/terraform-providers\/terraform-provider-openstack\/openstack\"\n)\n\n\/\/ New creates a new backend for Swift remote state.\nfunc New() backend.Backend {\n\ts := &schema.Backend{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"auth_url\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_AUTH_URL\", \"\"),\n\t\t\t\tDescription: descriptions[\"auth_url\"],\n\t\t\t},\n\n\t\t\t\"user_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_USER_ID\", \"\"),\n\t\t\t\tDescription: descriptions[\"user_name\"],\n\t\t\t},\n\n\t\t\t\"user_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_USERNAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"user_name\"],\n\t\t\t},\n\n\t\t\t\"tenant_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_TENANT_ID\",\n\t\t\t\t\t\"OS_PROJECT_ID\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"tenant_id\"],\n\t\t\t},\n\n\t\t\t\"tenant_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_TENANT_NAME\",\n\t\t\t\t\t\"OS_PROJECT_NAME\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"tenant_name\"],\n\t\t\t},\n\n\t\t\t\"password\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tSensitive: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_PASSWORD\", \"\"),\n\t\t\t\tDescription: descriptions[\"password\"],\n\t\t\t},\n\n\t\t\t\"token\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.MultiEnvDefaultFunc([]string{\n\t\t\t\t\t\"OS_TOKEN\",\n\t\t\t\t\t\"OS_AUTH_TOKEN\",\n\t\t\t\t}, \"\"),\n\t\t\t\tDescription: descriptions[\"token\"],\n\t\t\t},\n\n\t\t\t\"user_domain_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_USER_DOMAIN_NAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"user_domain_name\"],\n\t\t\t},\n\n\t\t\t\"user_domain_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_USER_DOMAIN_ID\", \"\"),\n\t\t\t\tDescription: descriptions[\"user_domain_id\"],\n\t\t\t},\n\n\t\t\t\"project_domain_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_PROJECT_DOMAIN_NAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"project_domain_name\"],\n\t\t\t},\n\n\t\t\t\"project_domain_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_PROJECT_DOMAIN_ID\", \"\"),\n\t\t\t\tDescription: descriptions[\"project_domain_id\"],\n\t\t\t},\n\n\t\t\t\"domain_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_DOMAIN_ID\", \"\"),\n\t\t\t\tDescription: descriptions[\"domain_id\"],\n\t\t\t},\n\n\t\t\t\"domain_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_DOMAIN_NAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"domain_name\"],\n\t\t\t},\n\n\t\t\t\"default_domain\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_DEFAULT_DOMAIN\", \"default\"),\n\t\t\t\tDescription: descriptions[\"default_domain\"],\n\t\t\t},\n\n\t\t\t\"cloud\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_CLOUD\", \"\"),\n\t\t\t\tDescription: descriptions[\"cloud\"],\n\t\t\t},\n\n\t\t\t\"region_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_REGION_NAME\", \"\"),\n\t\t\t\tDescription: descriptions[\"region_name\"],\n\t\t\t},\n\n\t\t\t\"insecure\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_INSECURE\", nil),\n\t\t\t\tDescription: descriptions[\"insecure\"],\n\t\t\t},\n\n\t\t\t\"endpoint_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_ENDPOINT_TYPE\", \"\"),\n\t\t\t},\n\n\t\t\t\"cacert_file\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_CACERT\", \"\"),\n\t\t\t\tDescription: descriptions[\"cacert_file\"],\n\t\t\t},\n\n\t\t\t\"cert\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_CERT\", \"\"),\n\t\t\t\tDescription: descriptions[\"cert\"],\n\t\t\t},\n\n\t\t\t\"key\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: schema.EnvDefaultFunc(\"OS_KEY\", \"\"),\n\t\t\t\tDescription: descriptions[\"key\"],\n\t\t\t},\n\n\t\t\t\"path\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"path\"],\n\t\t\t\tDeprecated: \"Use container instead\",\n\t\t\t\tConflictsWith: []string{\"container\"},\n\t\t\t},\n\n\t\t\t\"container\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"container\"],\n\t\t\t},\n\n\t\t\t\"archive_path\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"archive_path\"],\n\t\t\t\tDeprecated: \"Use archive_container instead\",\n\t\t\t\tConflictsWith: []string{\"archive_container\"},\n\t\t\t},\n\n\t\t\t\"archive_container\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"archive_container\"],\n\t\t\t},\n\n\t\t\t\"expire_after\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: descriptions[\"expire_after\"],\n\t\t\t},\n\n\t\t\t\"lock\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Lock state access\",\n\t\t\t\tDefault: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tresult := &Backend{Backend: s}\n\tresult.Backend.ConfigureFunc = result.configure\n\treturn result\n}\n\nvar descriptions map[string]string\n\nfunc init() {\n\tdescriptions = map[string]string{\n\t\t\"auth_url\": \"The Identity authentication URL.\",\n\n\t\t\"user_name\": \"Username to login with.\",\n\n\t\t\"user_id\": \"User ID to login with.\",\n\n\t\t\"tenant_id\": \"The ID of the Tenant (Identity v2) or Project (Identity v3)\\n\" +\n\t\t\t\"to login with.\",\n\n\t\t\"tenant_name\": \"The name of the Tenant (Identity v2) or Project (Identity v3)\\n\" +\n\t\t\t\"to login with.\",\n\n\t\t\"password\": \"Password to login with.\",\n\n\t\t\"token\": \"Authentication token to use as an alternative to username\/password.\",\n\n\t\t\"user_domain_name\": \"The name of the domain where the user resides (Identity v3).\",\n\n\t\t\"user_domain_id\": \"The ID of the domain where the user resides (Identity v3).\",\n\n\t\t\"project_domain_name\": \"The name of the domain where the project resides (Identity v3).\",\n\n\t\t\"project_domain_id\": \"The ID of the domain where the proejct resides (Identity v3).\",\n\n\t\t\"domain_id\": \"The ID of the Domain to scope to (Identity v3).\",\n\n\t\t\"domain_name\": \"The name of the Domain to scope to (Identity v3).\",\n\n\t\t\"default_domain\": \"The name of the Domain ID to scope to if no other domain is specified. Defaults to `default` (Identity v3).\",\n\n\t\t\"cloud\": \"An entry in a `clouds.yaml` file to use.\",\n\n\t\t\"region_name\": \"The name of the Region to use.\",\n\n\t\t\"insecure\": \"Trust self-signed certificates.\",\n\n\t\t\"cacert_file\": \"A Custom CA certificate.\",\n\n\t\t\"endpoint_type\": \"The catalog endpoint type to use.\",\n\n\t\t\"cert\": \"A client certificate to authenticate with.\",\n\n\t\t\"key\": \"A client private key to authenticate with.\",\n\n\t\t\"path\": \"Swift container path to use.\",\n\n\t\t\"container\": \"Swift container to create\",\n\n\t\t\"archive_path\": \"Swift container path to archive state to.\",\n\n\t\t\"archive_container\": \"Swift container to archive state to.\",\n\n\t\t\"expire_after\": \"Archive object expiry duration.\",\n\t}\n}\n\ntype Backend struct {\n\t*schema.Backend\n\n\t\/\/ Fields below are set from configure\n\tclient *gophercloud.ServiceClient\n\tarchive bool\n\tarchiveContainer string\n\texpireSecs int\n\tcontainer string\n\tlock bool\n}\n\nfunc (b *Backend) configure(ctx context.Context) error {\n\tif b.client != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Grab the resource data\n\tdata := schema.FromContextBackendConfig(ctx)\n\tconfig := &tf_openstack.Config{\n\t\tCACertFile: data.Get(\"cacert_file\").(string),\n\t\tClientCertFile: data.Get(\"cert\").(string),\n\t\tClientKeyFile: data.Get(\"key\").(string),\n\t\tCloud: data.Get(\"cloud\").(string),\n\t\tDefaultDomain: data.Get(\"default_domain\").(string),\n\t\tDomainID: data.Get(\"domain_id\").(string),\n\t\tDomainName: data.Get(\"domain_name\").(string),\n\t\tEndpointType: data.Get(\"endpoint_type\").(string),\n\t\tIdentityEndpoint: data.Get(\"auth_url\").(string),\n\t\tPassword: data.Get(\"password\").(string),\n\t\tProjectDomainID: data.Get(\"project_domain_id\").(string),\n\t\tProjectDomainName: data.Get(\"project_domain_name\").(string),\n\t\tToken: data.Get(\"token\").(string),\n\t\tTenantID: data.Get(\"tenant_id\").(string),\n\t\tTenantName: data.Get(\"tenant_name\").(string),\n\t\tUserDomainID: data.Get(\"user_domain_id\").(string),\n\t\tUserDomainName: data.Get(\"user_domain_name\").(string),\n\t\tUsername: data.Get(\"user_name\").(string),\n\t\tUserID: data.Get(\"user_id\").(string),\n\t}\n\n\tif v, ok := data.GetOkExists(\"insecure\"); ok {\n\t\tinsecure := v.(bool)\n\t\tconfig.Insecure = &insecure\n\t}\n\n\tif err := config.LoadAndValidate(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Assign Container\n\tb.container = data.Get(\"container\").(string)\n\tif b.container == \"\" {\n\t\t\/\/ Check deprecated field\n\t\tb.container = data.Get(\"path\").(string)\n\t}\n\n\t\/\/ Store the lock information\n\tb.lock = data.Get(\"lock\").(bool)\n\n\t\/\/ Enable object archiving?\n\tif archiveContainer, ok := data.GetOk(\"archive_container\"); ok {\n\t\tlog.Printf(\"[DEBUG] Archive_container set, enabling object versioning\")\n\t\tb.archive = true\n\t\tb.archiveContainer = archiveContainer.(string)\n\t} else if archivePath, ok := data.GetOk(\"archive_path\"); ok {\n\t\tlog.Printf(\"[DEBUG] Archive_path set, enabling object versioning\")\n\t\tb.archive = true\n\t\tb.archiveContainer = archivePath.(string)\n\t}\n\n\t\/\/ Enable object expiry?\n\tif expireRaw, ok := data.GetOk(\"expire_after\"); ok {\n\t\texpire := expireRaw.(string)\n\t\tlog.Printf(\"[DEBUG] Requested that remote state expires after %s\", expire)\n\n\t\tif strings.HasSuffix(expire, \"d\") {\n\t\t\tlog.Printf(\"[DEBUG] Got a days expire after duration. Converting to hours\")\n\t\t\tdays, err := strconv.Atoi(expire[:len(expire)-1])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error converting expire_after value %s to int: %s\", expire, err)\n\t\t\t}\n\n\t\t\texpire = fmt.Sprintf(\"%dh\", days*24)\n\t\t\tlog.Printf(\"[DEBUG] Expire after %s hours\", expire)\n\t\t}\n\n\t\texpireDur, err := time.ParseDuration(expire)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[DEBUG] Error parsing duration %s: %s\", expire, err)\n\t\t\treturn fmt.Errorf(\"Error parsing expire_after duration '%s': %s\", expire, err)\n\t\t}\n\t\tlog.Printf(\"[DEBUG] Seconds duration = %d\", int(expireDur.Seconds()))\n\t\tb.expireSecs = int(expireDur.Seconds())\n\t}\n\n\tobjClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{\n\t\tRegion: data.Get(\"region_name\").(string),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.client = objClient\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package telegrambot\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n\t\"github.com\/yamnikov-oleg\/avamon-bot\/monitor\"\n\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n)\n\nvar (\n\t\/\/ Green clover\n\tokStatusEmoji = string([]rune{0x2618, 0xfe0f})\n\t\/\/ Red alarm light\n\terrorStatusEmoji = string([]rune{0x1f6a8})\n)\n\nfunc replaceHTML(input string) string {\n\tinput = strings.Replace(input, \"<\", \"<\", -1)\n\tinput = strings.Replace(input, \">\", \">\", -1)\n\treturn input\n}\n\ntype Bot struct {\n\tAdminNickname string\n\tDB *TargetsDB\n\tTgBot *tgbotapi.BotAPI\n\tMonitor *monitor.Monitor\n\tsessionMap map[int64]*session\n}\n\nfunc (b *Bot) formatStatusUpdate(target monitor.Target, status monitor.Status) string {\n\tvar output string\n\tvar sign string\n\n\tif status.Type == monitor.StatusOK {\n\t\tsign = strings.Repeat(okStatusEmoji, 10) + \"\\n\"\n\t} else {\n\t\tsign = strings.Repeat(errorStatusEmoji, 10) + \"\\n\"\n\t}\n\n\toutput += sign\n\toutput += fmt.Sprintf(\"<b>%v:<\/b> <b>%v<\/b>\\n\\n\", replaceHTML(target.Title), status.Type)\n\toutput += fmt.Sprintf(\"<b>URL:<\/b> %v\\n\", replaceHTML(target.URL))\n\toutput += fmt.Sprintf(\"<b>Время ответа:<\/b> %v\\n\", status.ResponseTime)\n\n\tif status.Type != monitor.StatusOK {\n\t\toutput += fmt.Sprintf(\"<b>Сообщение:<\/b> %v\\n\", replaceHTML(status.Err.Error()))\n\t}\n\tif status.Type == monitor.StatusHTTPError {\n\t\toutput += fmt.Sprintf(\"<b>Статус HTTP:<\/b> %v %v\\n\", status.HTTPStatusCode, http.StatusText(status.HTTPStatusCode))\n\t}\n\toutput += sign\n\n\treturn output\n}\n\nfunc (b *Bot) SendMessage(chatID int64, message string) {\n\tmsg := tgbotapi.NewMessage(chatID, message)\n\tmsg.ParseMode = tgbotapi.ModeHTML\n\tmsg.DisableWebPagePreview = true\n\tb.TgBot.Send(msg)\n}\n\nfunc (b *Bot) SendDialogMessage(replyTo *tgbotapi.Message, message string) {\n\tmsg := tgbotapi.NewMessage(replyTo.Chat.ID, message)\n\tmsg.ReplyToMessageID = replyTo.MessageID\n\tmsg.ReplyMarkup = tgbotapi.ForceReply{\n\t\tForceReply: true,\n\t\tSelective: true,\n\t}\n\tb.TgBot.Send(msg)\n}\n\nfunc (b *Bot) MonitorStart() {\n\tgo func() {\n\t\tfor upd := range b.Monitor.Updates {\n\t\t\trec, err := b.DB.GetTarget(int(upd.Target.ID))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tb.SendMessage(\n\t\t\t\trec.ChatID,\n\t\t\t\tb.formatStatusUpdate(upd.Target, upd.Status))\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor err := range b.Monitor.Errors() {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\n\tgo b.Monitor.Run(nil)\n}\n\ntype session struct {\n\tStage int\n\tDialog dialog\n}\n\ntype dialog interface {\n\tContinueDialog(stepNumber int, update tgbotapi.Update, bot *tgbotapi.BotAPI) (int, bool)\n}\n\ntype addNewTarget struct {\n\tTitle string\n\tURL string\n\tbot *Bot\n}\n\nfunc (t *addNewTarget) ContinueDialog(stepNumber int, update tgbotapi.Update, bot *tgbotapi.BotAPI) (int, bool) {\n\tif stepNumber == 1 {\n\t\tt.bot.SendDialogMessage(update.Message, \"Введите заголовок цели\")\n\t\treturn 2, true\n\t}\n\tif stepNumber == 2 {\n\t\tt.Title = update.Message.Text\n\t\tt.bot.SendDialogMessage(update.Message, \"Введите URL адрес цели\")\n\t\treturn 3, true\n\t}\n\tif stepNumber == 3 {\n\t\tif _, err := url.Parse(update.Message.Text); err != nil {\n\t\t\tt.bot.SendDialogMessage(update.Message, \"Ошибка ввода URL адреса, попробуйте еще раз\")\n\t\t\treturn 3, true\n\t\t}\n\t\tt.URL = update.Message.Text\n\t\terr := t.bot.DB.CreateTarget(Record{\n\t\t\tChatID: update.Message.Chat.ID,\n\t\t\tTitle: t.Title,\n\t\t\tURL: t.URL,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.bot.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка добавления цели, свяжитесь с администратором: %v\",\n\t\t\t\t\tt.bot.AdminNickname))\n\t\t\treturn 0, false\n\t\t}\n\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Цель успешно добавлена\")\n\t\treturn 0, false\n\t}\n\treturn 0, false\n}\n\ntype deleteTarget struct {\n\tbot *Bot\n}\n\nfunc (t *deleteTarget) ContinueDialog(stepNumber int, update tgbotapi.Update, bot *tgbotapi.BotAPI) (int, bool) {\n\tif stepNumber == 1 {\n\t\ttargs, err := t.bot.DB.GetCurrentTargets(update.Message.Chat.ID)\n\t\tif err != nil {\n\t\t\tt.bot.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка получения целей, свяжитесь с администратором: %v\",\n\t\t\t\t\tt.bot.AdminNickname))\n\t\t\treturn 0, false\n\t\t}\n\t\tif len(targs) == 0 {\n\t\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Целей не обнаружено!\")\n\t\t\treturn 0, false\n\t\t}\n\t\tvar targetStrings []string\n\t\ttargetStrings = append(targetStrings, \"Введите <b>идентификатор<\/b> цели для удаления\\n\")\n\t\tfor _, target := range targs {\n\t\t\ttargetStrings = append(\n\t\t\t\ttargetStrings,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"<b>Идентификатор:<\/b> %v\\n<b>Заголовок:<\/b> %v\\n<b>URL:<\/b> %v\\n\",\n\t\t\t\t\ttarget.ID,\n\t\t\t\t\treplaceHTML(target.Title),\n\t\t\t\t\treplaceHTML(target.URL)))\n\t\t}\n\t\tmessage := strings.Join(targetStrings, \"\\n\")\n\t\tt.bot.SendDialogMessage(update.Message, message)\n\t\treturn 2, true\n\t}\n\tif stepNumber == 2 {\n\t\ttarget, err := strconv.Atoi(update.Message.Text)\n\t\tif err != nil {\n\t\t\tt.bot.SendDialogMessage(update.Message, \"Ошибка ввода идентификатора\")\n\t\t\treturn 2, true\n\t\t}\n\t\ttargetFromDB, err := t.bot.DB.GetTarget(target)\n\t\tif err != nil || targetFromDB.ChatID != update.Message.Chat.ID {\n\t\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Цель не найдена\")\n\t\t\treturn 0, false\n\t\t}\n\t\terr = t.bot.DB.DeleteTarget(target)\n\t\tif err != nil {\n\t\t\tt.bot.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка удаления цели, свяжитесь с администратором: %v\",\n\t\t\t\t\tt.bot.AdminNickname))\n\t\t\treturn 0, false\n\t\t}\n\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Цель успешно удалена!\")\n\t\treturn 0, false\n\t}\n\treturn 0, false\n}\n\nfunc (b *Bot) Dispatch(update *tgbotapi.Update) {\n\tif update.Message == nil {\n\t\treturn\n\t}\n\tif _, ok := b.sessionMap[update.Message.Chat.ID]; !ok {\n\t\tb.sessionMap[update.Message.Chat.ID] = &session{}\n\t\tb.sessionMap[update.Message.Chat.ID].Stage = 1\n\t\tb.sessionMap[update.Message.Chat.ID].Dialog = nil\n\t}\n\tsess := b.sessionMap[update.Message.Chat.ID]\n\tif sess.Dialog != nil {\n\t\tvar ok bool\n\t\tsess.Stage, ok = sess.Dialog.ContinueDialog(sess.Stage, *update, b.TgBot)\n\t\tif !ok {\n\t\t\tsess.Dialog = nil\n\t\t}\n\t\treturn\n\t}\n\tif update.Message.Command() == \"start\" {\n\t\tb.SendMessage(\n\t\t\tupdate.Message.Chat.ID,\n\t\t\t\"Привет!\\nЯ бот который умеет следить за доступностью сайтов.\\n\")\n\t\treturn\n\t}\n\tif update.Message.Command() == \"add\" {\n\t\tb.StartDialog(update, &addNewTarget{\n\t\t\tbot: b,\n\t\t})\n\t}\n\tif update.Message.Command() == \"targets\" {\n\t\ttargs, err := b.DB.GetCurrentTargets(update.Message.Chat.ID)\n\t\tif err != nil {\n\t\t\tb.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка получения целей, свяжитесь с администратором: %v\",\n\t\t\t\t\tb.AdminNickname))\n\t\t\treturn\n\t\t}\n\t\tif len(targs) == 0 {\n\t\t\tb.SendMessage(update.Message.Chat.ID, \"Целей не обнаружено!\")\n\t\t\treturn\n\t\t}\n\t\tvar targetStrings []string\n\t\tfor _, target := range targs {\n\t\t\tstatus, ok, err := b.Monitor.StatusStore.GetStatus(target.ToTarget())\n\t\t\tif err != nil {\n\t\t\t\tb.SendMessage(\n\t\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\"Ошибка статуса целей, свяжитесь с администратором: %v\",\n\t\t\t\t\t\tb.AdminNickname))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar header string\n\t\t\theader = fmt.Sprintf(\n\t\t\t\t\"<a href=\\\"%v\\\">%v<\/a>\",\n\t\t\t\treplaceHTML(target.URL), replaceHTML(target.Title))\n\n\t\t\tvar statusText string\n\t\t\tif ok {\n\t\t\t\tvar emoji string\n\t\t\t\tif status.Type == monitor.StatusOK {\n\t\t\t\t\temoji = okStatusEmoji\n\t\t\t\t} else {\n\t\t\t\t\temoji = errorStatusEmoji\n\t\t\t\t}\n\n\t\t\t\tstatusText = fmt.Sprintf(\n\t\t\t\t\t\"%v %v (%v ms)\",\n\t\t\t\t\temoji, status.Type, int64(status.ResponseTime\/time.Millisecond))\n\t\t\t} else {\n\t\t\t\tstatusText = \"N\/A\"\n\t\t\t}\n\n\t\t\ttargetStrings = append(\n\t\t\t\ttargetStrings, fmt.Sprintf(\"%v: %v\", header, statusText))\n\t\t}\n\t\tmessage := strings.Join(targetStrings, \"\\n\")\n\t\tb.SendMessage(update.Message.Chat.ID, message)\n\t\treturn\n\t}\n\tif update.Message.Command() == \"delete\" {\n\t\tb.StartDialog(update, &deleteTarget{\n\t\t\tbot: b,\n\t\t})\n\t}\n}\n\nfunc (b *Bot) StartDialog(update *tgbotapi.Update, dialog dialog) {\n\tvar ok bool\n\tb.sessionMap[update.Message.Chat.ID].Dialog = dialog\n\tb.sessionMap[update.Message.Chat.ID].Stage, ok = dialog.ContinueDialog(1, *update, b.TgBot)\n\tif !ok {\n\t\tb.sessionMap[update.Message.Chat.ID].Dialog = nil\n\t}\n\treturn\n}\n\nfunc (b *Bot) Run() error {\n\tu := tgbotapi.NewUpdate(0)\n\tu.Timeout = 0\n\n\tupdates, err := b.TgBot.GetUpdatesChan(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor update := range updates {\n\t\tb.Dispatch(&update)\n\t}\n\n\treturn nil\n}\n<commit_msg>:bug: Fix nil map assignment<commit_after>package telegrambot\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n\t\"github.com\/yamnikov-oleg\/avamon-bot\/monitor\"\n\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n)\n\nvar (\n\t\/\/ Green clover\n\tokStatusEmoji = string([]rune{0x2618, 0xfe0f})\n\t\/\/ Red alarm light\n\terrorStatusEmoji = string([]rune{0x1f6a8})\n)\n\nfunc replaceHTML(input string) string {\n\tinput = strings.Replace(input, \"<\", \"<\", -1)\n\tinput = strings.Replace(input, \">\", \">\", -1)\n\treturn input\n}\n\ntype Bot struct {\n\tAdminNickname string\n\tDB *TargetsDB\n\tTgBot *tgbotapi.BotAPI\n\tMonitor *monitor.Monitor\n\tsessionMap map[int64]*session\n}\n\nfunc (b *Bot) formatStatusUpdate(target monitor.Target, status monitor.Status) string {\n\tvar output string\n\tvar sign string\n\n\tif status.Type == monitor.StatusOK {\n\t\tsign = strings.Repeat(okStatusEmoji, 10) + \"\\n\"\n\t} else {\n\t\tsign = strings.Repeat(errorStatusEmoji, 10) + \"\\n\"\n\t}\n\n\toutput += sign\n\toutput += fmt.Sprintf(\"<b>%v:<\/b> <b>%v<\/b>\\n\\n\", replaceHTML(target.Title), status.Type)\n\toutput += fmt.Sprintf(\"<b>URL:<\/b> %v\\n\", replaceHTML(target.URL))\n\toutput += fmt.Sprintf(\"<b>Время ответа:<\/b> %v\\n\", status.ResponseTime)\n\n\tif status.Type != monitor.StatusOK {\n\t\toutput += fmt.Sprintf(\"<b>Сообщение:<\/b> %v\\n\", replaceHTML(status.Err.Error()))\n\t}\n\tif status.Type == monitor.StatusHTTPError {\n\t\toutput += fmt.Sprintf(\"<b>Статус HTTP:<\/b> %v %v\\n\", status.HTTPStatusCode, http.StatusText(status.HTTPStatusCode))\n\t}\n\toutput += sign\n\n\treturn output\n}\n\nfunc (b *Bot) SendMessage(chatID int64, message string) {\n\tmsg := tgbotapi.NewMessage(chatID, message)\n\tmsg.ParseMode = tgbotapi.ModeHTML\n\tmsg.DisableWebPagePreview = true\n\tb.TgBot.Send(msg)\n}\n\nfunc (b *Bot) SendDialogMessage(replyTo *tgbotapi.Message, message string) {\n\tmsg := tgbotapi.NewMessage(replyTo.Chat.ID, message)\n\tmsg.ReplyToMessageID = replyTo.MessageID\n\tmsg.ReplyMarkup = tgbotapi.ForceReply{\n\t\tForceReply: true,\n\t\tSelective: true,\n\t}\n\tb.TgBot.Send(msg)\n}\n\nfunc (b *Bot) MonitorStart() {\n\tgo func() {\n\t\tfor upd := range b.Monitor.Updates {\n\t\t\trec, err := b.DB.GetTarget(int(upd.Target.ID))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tb.SendMessage(\n\t\t\t\trec.ChatID,\n\t\t\t\tb.formatStatusUpdate(upd.Target, upd.Status))\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor err := range b.Monitor.Errors() {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\n\tgo b.Monitor.Run(nil)\n}\n\ntype session struct {\n\tStage int\n\tDialog dialog\n}\n\ntype dialog interface {\n\tContinueDialog(stepNumber int, update tgbotapi.Update, bot *tgbotapi.BotAPI) (int, bool)\n}\n\ntype addNewTarget struct {\n\tTitle string\n\tURL string\n\tbot *Bot\n}\n\nfunc (t *addNewTarget) ContinueDialog(stepNumber int, update tgbotapi.Update, bot *tgbotapi.BotAPI) (int, bool) {\n\tif stepNumber == 1 {\n\t\tt.bot.SendDialogMessage(update.Message, \"Введите заголовок цели\")\n\t\treturn 2, true\n\t}\n\tif stepNumber == 2 {\n\t\tt.Title = update.Message.Text\n\t\tt.bot.SendDialogMessage(update.Message, \"Введите URL адрес цели\")\n\t\treturn 3, true\n\t}\n\tif stepNumber == 3 {\n\t\tif _, err := url.Parse(update.Message.Text); err != nil {\n\t\t\tt.bot.SendDialogMessage(update.Message, \"Ошибка ввода URL адреса, попробуйте еще раз\")\n\t\t\treturn 3, true\n\t\t}\n\t\tt.URL = update.Message.Text\n\t\terr := t.bot.DB.CreateTarget(Record{\n\t\t\tChatID: update.Message.Chat.ID,\n\t\t\tTitle: t.Title,\n\t\t\tURL: t.URL,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.bot.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка добавления цели, свяжитесь с администратором: %v\",\n\t\t\t\t\tt.bot.AdminNickname))\n\t\t\treturn 0, false\n\t\t}\n\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Цель успешно добавлена\")\n\t\treturn 0, false\n\t}\n\treturn 0, false\n}\n\ntype deleteTarget struct {\n\tbot *Bot\n}\n\nfunc (t *deleteTarget) ContinueDialog(stepNumber int, update tgbotapi.Update, bot *tgbotapi.BotAPI) (int, bool) {\n\tif stepNumber == 1 {\n\t\ttargs, err := t.bot.DB.GetCurrentTargets(update.Message.Chat.ID)\n\t\tif err != nil {\n\t\t\tt.bot.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка получения целей, свяжитесь с администратором: %v\",\n\t\t\t\t\tt.bot.AdminNickname))\n\t\t\treturn 0, false\n\t\t}\n\t\tif len(targs) == 0 {\n\t\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Целей не обнаружено!\")\n\t\t\treturn 0, false\n\t\t}\n\t\tvar targetStrings []string\n\t\ttargetStrings = append(targetStrings, \"Введите <b>идентификатор<\/b> цели для удаления\\n\")\n\t\tfor _, target := range targs {\n\t\t\ttargetStrings = append(\n\t\t\t\ttargetStrings,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"<b>Идентификатор:<\/b> %v\\n<b>Заголовок:<\/b> %v\\n<b>URL:<\/b> %v\\n\",\n\t\t\t\t\ttarget.ID,\n\t\t\t\t\treplaceHTML(target.Title),\n\t\t\t\t\treplaceHTML(target.URL)))\n\t\t}\n\t\tmessage := strings.Join(targetStrings, \"\\n\")\n\t\tt.bot.SendDialogMessage(update.Message, message)\n\t\treturn 2, true\n\t}\n\tif stepNumber == 2 {\n\t\ttarget, err := strconv.Atoi(update.Message.Text)\n\t\tif err != nil {\n\t\t\tt.bot.SendDialogMessage(update.Message, \"Ошибка ввода идентификатора\")\n\t\t\treturn 2, true\n\t\t}\n\t\ttargetFromDB, err := t.bot.DB.GetTarget(target)\n\t\tif err != nil || targetFromDB.ChatID != update.Message.Chat.ID {\n\t\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Цель не найдена\")\n\t\t\treturn 0, false\n\t\t}\n\t\terr = t.bot.DB.DeleteTarget(target)\n\t\tif err != nil {\n\t\t\tt.bot.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка удаления цели, свяжитесь с администратором: %v\",\n\t\t\t\t\tt.bot.AdminNickname))\n\t\t\treturn 0, false\n\t\t}\n\t\tt.bot.SendMessage(update.Message.Chat.ID, \"Цель успешно удалена!\")\n\t\treturn 0, false\n\t}\n\treturn 0, false\n}\n\nfunc (b *Bot) Dispatch(update *tgbotapi.Update) {\n\tif update.Message == nil {\n\t\treturn\n\t}\n\tif _, ok := b.sessionMap[update.Message.Chat.ID]; !ok {\n\t\tb.sessionMap[update.Message.Chat.ID] = &session{}\n\t\tb.sessionMap[update.Message.Chat.ID].Stage = 1\n\t\tb.sessionMap[update.Message.Chat.ID].Dialog = nil\n\t}\n\tsess := b.sessionMap[update.Message.Chat.ID]\n\tif sess.Dialog != nil {\n\t\tvar ok bool\n\t\tsess.Stage, ok = sess.Dialog.ContinueDialog(sess.Stage, *update, b.TgBot)\n\t\tif !ok {\n\t\t\tsess.Dialog = nil\n\t\t}\n\t\treturn\n\t}\n\tif update.Message.Command() == \"start\" {\n\t\tb.SendMessage(\n\t\t\tupdate.Message.Chat.ID,\n\t\t\t\"Привет!\\nЯ бот который умеет следить за доступностью сайтов.\\n\")\n\t\treturn\n\t}\n\tif update.Message.Command() == \"add\" {\n\t\tb.StartDialog(update, &addNewTarget{\n\t\t\tbot: b,\n\t\t})\n\t}\n\tif update.Message.Command() == \"targets\" {\n\t\ttargs, err := b.DB.GetCurrentTargets(update.Message.Chat.ID)\n\t\tif err != nil {\n\t\t\tb.SendMessage(\n\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Ошибка получения целей, свяжитесь с администратором: %v\",\n\t\t\t\t\tb.AdminNickname))\n\t\t\treturn\n\t\t}\n\t\tif len(targs) == 0 {\n\t\t\tb.SendMessage(update.Message.Chat.ID, \"Целей не обнаружено!\")\n\t\t\treturn\n\t\t}\n\t\tvar targetStrings []string\n\t\tfor _, target := range targs {\n\t\t\tstatus, ok, err := b.Monitor.StatusStore.GetStatus(target.ToTarget())\n\t\t\tif err != nil {\n\t\t\t\tb.SendMessage(\n\t\t\t\t\tupdate.Message.Chat.ID,\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\"Ошибка статуса целей, свяжитесь с администратором: %v\",\n\t\t\t\t\t\tb.AdminNickname))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar header string\n\t\t\theader = fmt.Sprintf(\n\t\t\t\t\"<a href=\\\"%v\\\">%v<\/a>\",\n\t\t\t\treplaceHTML(target.URL), replaceHTML(target.Title))\n\n\t\t\tvar statusText string\n\t\t\tif ok {\n\t\t\t\tvar emoji string\n\t\t\t\tif status.Type == monitor.StatusOK {\n\t\t\t\t\temoji = okStatusEmoji\n\t\t\t\t} else {\n\t\t\t\t\temoji = errorStatusEmoji\n\t\t\t\t}\n\n\t\t\t\tstatusText = fmt.Sprintf(\n\t\t\t\t\t\"%v %v (%v ms)\",\n\t\t\t\t\temoji, status.Type, int64(status.ResponseTime\/time.Millisecond))\n\t\t\t} else {\n\t\t\t\tstatusText = \"N\/A\"\n\t\t\t}\n\n\t\t\ttargetStrings = append(\n\t\t\t\ttargetStrings, fmt.Sprintf(\"%v: %v\", header, statusText))\n\t\t}\n\t\tmessage := strings.Join(targetStrings, \"\\n\")\n\t\tb.SendMessage(update.Message.Chat.ID, message)\n\t\treturn\n\t}\n\tif update.Message.Command() == \"delete\" {\n\t\tb.StartDialog(update, &deleteTarget{\n\t\t\tbot: b,\n\t\t})\n\t}\n}\n\nfunc (b *Bot) StartDialog(update *tgbotapi.Update, dialog dialog) {\n\tvar ok bool\n\tb.sessionMap[update.Message.Chat.ID].Dialog = dialog\n\tb.sessionMap[update.Message.Chat.ID].Stage, ok = dialog.ContinueDialog(1, *update, b.TgBot)\n\tif !ok {\n\t\tb.sessionMap[update.Message.Chat.ID].Dialog = nil\n\t}\n\treturn\n}\n\nfunc (b *Bot) Run() error {\n\tb.sessionMap = map[int64]*session{}\n\n\tu := tgbotapi.NewUpdate(0)\n\tu.Timeout = 0\n\n\tupdates, err := b.TgBot.GetUpdatesChan(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor update := range updates {\n\t\tb.Dispatch(&update)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Spreed WebRTC.\n * Copyright (C) 2013-2015 struktur AG\n *\n * This file is part of Spreed WebRTC.\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/nats-io\/nats\"\n)\n\nconst (\n\tBusManagerStartup = \"startup\"\n\tBusManagerOffer = \"offer\"\n\tBusManagerAnswer = \"answer\"\n\tBusManagerBye = \"bye\"\n\tBusManagerConnect = \"connect\"\n\tBusManagerDisconnect = \"disconnect\"\n\tBusManagerSession = \"session\"\n)\n\n\/\/ A BusManager provides the API to interact with a bus.\ntype BusManager interface {\n\tTrigger(name, from, payload string, data interface{}) error\n}\n\n\/\/ A BusTrigger is a container to serialize trigger events\n\/\/ for the bus backend.\ntype BusTrigger struct {\n\tId string\n\tName string\n\tFrom string\n\tPayload string `json:\",omitempty\"`\n\tData interface{} `json:\",omitempty\"`\n}\n\n\/\/ BusSubjectTrigger returns the bus subject for trigger payloads.\nfunc BusSubjectTrigger(prefix, suffix string) string {\n\treturn fmt.Sprintf(\"%s.%s\", prefix, suffix)\n}\n\ntype busManager struct {\n\tBusManager\n}\n\n\/\/ NewBusManager creates and initializes a new BusMager with the\n\/\/ provided flags for NATS support. It is intended to connect the\n\/\/ backend bus with a easy to use API to send and receive bus data.\nfunc NewBusManager(id string, useNats bool, subjectPrefix string) BusManager {\n\tvar b BusManager\n\tvar err error\n\tif useNats {\n\t\tb, err = newNatsBus(id, subjectPrefix)\n\t\tif err == nil {\n\t\t\tlog.Println(\"Nats bus connected\")\n\t\t} else {\n\t\t\tlog.Println(\"Error connecting nats bus\", err)\n\t\t\tb = &noopBus{id}\n\t\t}\n\t} else {\n\t\tb = &noopBus{id}\n\t}\n\tif err == nil {\n\t\tb.Trigger(BusManagerStartup, id, \"\", nil)\n\t}\n\n\treturn &busManager{b}\n}\n\ntype noopBus struct {\n\tid string\n}\n\nfunc (bus *noopBus) Trigger(name, from, payload string, data interface{}) error {\n\treturn nil\n}\n\ntype natsBus struct {\n\tid string\n\tprefix string\n\tec *nats.EncodedConn\n}\n\nfunc newNatsBus(id, prefix string) (*natsBus, error) {\n\tec, err := EstablishNatsConnection(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif prefix == \"\" {\n\t\tprefix = \"channelling.trigger\"\n\t}\n\treturn &natsBus{id, prefix, ec}, nil\n}\n\nfunc (bus *natsBus) Trigger(name, from, payload string, data interface{}) (err error) {\n\tif bus.ec != nil {\n\t\ttrigger := &BusTrigger{\n\t\t\tId: bus.id,\n\t\t\tName: name,\n\t\t\tFrom: from,\n\t\t\tPayload: payload,\n\t\t\tData: data,\n\t\t}\n\t\terr = bus.ec.Publish(BusSubjectTrigger(bus.prefix, name), trigger)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to trigger NATS event\", err)\n\t\t}\n\t}\n\treturn err\n}\n<commit_msg>Trigger NATS events non blocking through buffered channel.<commit_after>\/*\n * Spreed WebRTC.\n * Copyright (C) 2013-2015 struktur AG\n *\n * This file is part of Spreed WebRTC.\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/nats-io\/nats\"\n)\n\nconst (\n\tBusManagerStartup = \"startup\"\n\tBusManagerOffer = \"offer\"\n\tBusManagerAnswer = \"answer\"\n\tBusManagerBye = \"bye\"\n\tBusManagerConnect = \"connect\"\n\tBusManagerDisconnect = \"disconnect\"\n\tBusManagerSession = \"session\"\n)\n\n\/\/ A BusManager provides the API to interact with a bus.\ntype BusManager interface {\n\tTrigger(name, from, payload string, data interface{}) error\n}\n\n\/\/ A BusTrigger is a container to serialize trigger events\n\/\/ for the bus backend.\ntype BusTrigger struct {\n\tId string\n\tName string\n\tFrom string\n\tPayload string `json:\",omitempty\"`\n\tData interface{} `json:\",omitempty\"`\n}\n\n\/\/ BusSubjectTrigger returns the bus subject for trigger payloads.\nfunc BusSubjectTrigger(prefix, suffix string) string {\n\treturn fmt.Sprintf(\"%s.%s\", prefix, suffix)\n}\n\ntype busManager struct {\n\tBusManager\n}\n\n\/\/ NewBusManager creates and initializes a new BusMager with the\n\/\/ provided flags for NATS support. It is intended to connect the\n\/\/ backend bus with a easy to use API to send and receive bus data.\nfunc NewBusManager(id string, useNats bool, subjectPrefix string) BusManager {\n\tvar b BusManager\n\tvar err error\n\tif useNats {\n\t\tb, err = newNatsBus(id, subjectPrefix)\n\t\tif err == nil {\n\t\t\tlog.Println(\"Nats bus connected\")\n\t\t} else {\n\t\t\tlog.Println(\"Error connecting nats bus\", err)\n\t\t\tb = &noopBus{id}\n\t\t}\n\t} else {\n\t\tb = &noopBus{id}\n\t}\n\tif err == nil {\n\t\tb.Trigger(BusManagerStartup, id, \"\", nil)\n\t}\n\n\treturn &busManager{b}\n}\n\ntype noopBus struct {\n\tid string\n}\n\nfunc (bus *noopBus) Trigger(name, from, payload string, data interface{}) error {\n\treturn nil\n}\n\ntype natsBus struct {\n\tid string\n\tprefix string\n\tec *nats.EncodedConn\n\ttriggerQueue chan *busQueueEntry\n}\n\nfunc newNatsBus(id, prefix string) (*natsBus, error) {\n\tec, err := EstablishNatsConnection(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif prefix == \"\" {\n\t\tprefix = \"channelling.trigger\"\n\t}\n\t\/\/ Create buffered channel for outbound NATS data.\n\ttriggerQueue := make(chan *busQueueEntry, 50)\n\t\/\/ Start go routine to process outbount NATS publishing.\n\tgo chPublish(ec, triggerQueue)\n\treturn &natsBus{id, prefix, ec, triggerQueue}, nil\n}\n\nfunc (bus *natsBus) Trigger(name, from, payload string, data interface{}) (err error) {\n\tif bus.ec != nil {\n\t\ttrigger := &BusTrigger{\n\t\t\tId: bus.id,\n\t\t\tName: name,\n\t\t\tFrom: from,\n\t\t\tPayload: payload,\n\t\t\tData: data,\n\t\t}\n\t\tentry := &busQueueEntry{BusSubjectTrigger(bus.prefix, name), trigger}\n\t\tselect {\n\t\tcase bus.triggerQueue <- entry:\n\t\t\t\/\/ sent ok\n\t\tdefault:\n\t\t\tlog.Println(\"Failed to queue NATS event - queue full?\")\n\t\t\terr = errors.New(\"NATS trigger queue full\")\n\t\t}\n\t}\n\treturn err\n}\n\ntype busQueueEntry struct {\n\tsubject string\n\tdata interface{}\n}\n\nfunc chPublish(ec *nats.EncodedConn, channel chan (*busQueueEntry)) {\n\tfor {\n\t\tentry := <-channel\n\t\terr := ec.Publish(entry.subject, entry.data)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to publish to NATS\", entry.subject, err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package controller_test\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/almighty\/almighty-core\/account\"\n\t\"github.com\/almighty\/almighty-core\/app\"\n\t\"github.com\/almighty\/almighty-core\/app\/test\"\n\t\"github.com\/almighty\/almighty-core\/application\"\n\t. \"github.com\/almighty\/almighty-core\/controller\"\n\t\"github.com\/almighty\/almighty-core\/gormapplication\"\n\t\"github.com\/almighty\/almighty-core\/gormsupport\/cleaner\"\n\t\"github.com\/almighty\/almighty-core\/gormtestsupport\"\n\t\"github.com\/almighty\/almighty-core\/resource\"\n\t\"github.com\/goadesign\/goa\"\n\tuuid \"github.com\/satori\/go.uuid\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc TestRunSearchUser(t *testing.T) {\n\tresource.Require(t, resource.Database)\n\tpwd, err := os.Getwd()\n\trequire.Nil(t, err)\n\tsuite.Run(t, &TestSearchUserSearch{DBTestSuite: gormtestsupport.NewDBTestSuite(pwd + \"\/..\/config.yaml\")})\n}\n\ntype TestSearchUserSearch struct {\n\tgormtestsupport.DBTestSuite\n\tdb *gormapplication.GormDB\n\tsvc *goa.Service\n\tcontroller *SearchController\n\tclean func()\n}\n\nfunc (s *TestSearchUserSearch) SetupSuite() {\n\ts.DBTestSuite.SetupSuite()\n\ts.svc = goa.New(\"test\")\n\ts.db = gormapplication.NewGormDB(s.DB)\n\ts.controller = NewSearchController(s.svc, s.db, s.Configuration)\n}\n\nfunc (s *TestSearchUserSearch) SetupTest() {\n\ts.clean = cleaner.DeleteCreatedEntities(s.DB)\n}\n\nfunc (s *TestSearchUserSearch) TearDownTest() {\n\ts.clean = cleaner.DeleteCreatedEntities(s.DB)\n}\n\ntype userSearchTestArgs struct {\n\tpageOffset *string\n\tpageLimit *int\n\tq string\n}\n\ntype userSearchTestExpect func(*testing.T, okScenarioUserSearchTest, *app.UserList)\ntype userSearchTestExpects []userSearchTestExpect\n\ntype okScenarioUserSearchTest struct {\n\tname string\n\tuserSearchTestArgs userSearchTestArgs\n\tuserSearchTestExpects userSearchTestExpects\n}\n\nfunc (s *TestSearchUserSearch) TestUsersSearchOK() {\n\n\tidents := s.createTestData()\n\tdefer s.cleanTestData(idents)\n\n\ttests := []okScenarioUserSearchTest{\n\t\t{\"With lowercase fullname query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"x_test_ab\"}, userSearchTestExpects{s.totalCount(3)}},\n\t\t{\"With uppercase fullname query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"X_TEST_AB\"}, userSearchTestExpects{s.totalCount(3)}},\n\t\t{\"With uppercase email query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"EMAIL_X_TEST_AB\"}, userSearchTestExpects{s.totalCount(1)}},\n\t\t{\"With lowercase email query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"email_x_test_ab\"}, userSearchTestExpects{s.totalCount(1)}},\n\t\t{\"With username query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"x_test_c\"}, userSearchTestExpects{s.totalCount(3)}},\n\t\t{\"with special chars\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"&:\\n!#%?*\"}, userSearchTestExpects{s.totalCount(0)}},\n\t\t{\"with multi page\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"TEST\"}, userSearchTestExpects{s.hasLinks(\"Next\")}},\n\t\t{\"with last page\", userSearchTestArgs{s.offset(strconv.Itoa(len(idents) - 1)), limit(10), \"TEST\"}, userSearchTestExpects{s.hasNoLinks(\"Next\"), s.hasLinks(\"Prev\")}},\n\t\t{\"with different values\", userSearchTestArgs{s.offset(\"0\"), s.limit(10), \"TEST\"}, userSearchTestExpects{s.differentValues()}},\n\t}\n\n\tfor _, tt := range tests {\n\t\t_, result := test.UsersSearchOK(s.T(), context.Background(), s.svc, s.controller, tt.userSearchTestArgs.pageLimit, tt.userSearchTestArgs.pageOffset, tt.userSearchTestArgs.q)\n\t\tfor _, userSearchTestExpect := range tt.userSearchTestExpects {\n\t\t\tuserSearchTestExpect(s.T(), tt, result)\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) TestUsersSearchBadRequest() {\n\tt := s.T()\n\ttests := []struct {\n\t\tname string\n\t\tuserSearchTestArgs userSearchTestArgs\n\t}{\n\t\t{\"with empty query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"\"}},\n\t}\n\n\tfor _, tt := range tests {\n\t\ttest.UsersSearchBadRequest(t, context.Background(), s.svc, s.controller, tt.userSearchTestArgs.pageLimit, tt.userSearchTestArgs.pageOffset, tt.userSearchTestArgs.q)\n\t}\n}\n\nfunc (s *TestSearchUserSearch) createTestData() []account.Identity {\n\tnames := []string{\"X_TEST_A\", \"X_TEST_AB\", \"X_TEST_B\", \"X_TEST_C\"}\n\temails := []string{\"email_x_test_ab@redhat.org\", \"email_x_test_a@redhat.org\", \"email_x_test_c@redhat.org\", \"email_x_test_b@redhat.org\"}\n\tusernames := []string{\"x_test_b\", \"x_test_c\", \"x_test_a\", \"x_test_ab\"}\n\tfor i := 0; i < 20; i++ {\n\t\tnames = append(names, \"TEST_\"+strconv.Itoa(i))\n\t\temails = append(emails, \"myemail\"+strconv.Itoa(i))\n\t\tusernames = append(usernames, \"myusernames\"+strconv.Itoa(i))\n\t}\n\n\tidents := []account.Identity{}\n\n\terr := application.Transactional(s.db, func(app application.Application) error {\n\t\tfor i, name := range names {\n\n\t\t\tuser := account.User{\n\t\t\t\tFullName: name,\n\t\t\t\tImageURL: \"http:\/\/example.org\/\" + name + \".png\",\n\t\t\t\tEmail: emails[i],\n\t\t\t}\n\t\t\terr := app.Users().Create(context.Background(), &user)\n\t\t\trequire.Nil(s.T(), err)\n\n\t\t\tident := account.Identity{\n\t\t\t\tUser: user,\n\t\t\t\tUsername: usernames[i] + uuid.NewV4().String(),\n\t\t\t\tProviderType: \"kc\",\n\t\t\t}\n\t\t\terr = app.Identities().Create(context.Background(), &ident)\n\t\t\trequire.Nil(s.T(), err)\n\n\t\t\tidents = append(idents, ident)\n\t\t}\n\t\treturn nil\n\t})\n\trequire.Nil(s.T(), err)\n\treturn idents\n}\n\nfunc (s *TestSearchUserSearch) cleanTestData(idents []account.Identity) {\n\terr := application.Transactional(s.db, func(app application.Application) error {\n\t\tdb := app.(*gormapplication.GormTransaction).DB()\n\t\tdb = db.Unscoped()\n\t\tfor _, ident := range idents {\n\t\t\tdb.Delete(ident)\n\t\t\tdb.Delete(&account.User{}, \"id = ?\", ident.User.ID)\n\t\t}\n\t\treturn nil\n\t})\n\trequire.Nil(s.T(), err)\n}\n\nfunc (s *TestSearchUserSearch) totalCount(count int) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tif got := result.Meta.TotalCount; got != count {\n\t\t\tt.Errorf(\"%s got = %v, want %v\", scenario.name, got, count)\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) totalCountAtLeast(count int) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tgot := result.Meta.TotalCount\n\t\tif !(got >= count) {\n\t\t\tt.Errorf(\"%s got %v, wanted at least %v\", scenario.name, got, count)\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) hasLinks(linkNames ...string) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tfor _, linkName := range linkNames {\n\t\t\tlink := linkName\n\t\t\tif reflect.Indirect(reflect.ValueOf(result.Links)).FieldByName(link).IsNil() {\n\t\t\t\tt.Errorf(\"%s got empty link, wanted %s\", scenario.name, link)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) hasNoLinks(linkNames ...string) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tfor _, linkName := range linkNames {\n\t\t\tif !reflect.Indirect(reflect.ValueOf(result.Links)).FieldByName(linkName).IsNil() {\n\t\t\t\tt.Errorf(\"%s got link, wanted empty %s\", scenario.name, linkName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) differentValues() userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tvar prev *app.UserData\n\n\t\tfor i := range result.Data {\n\t\t\tu := result.Data[i]\n\t\t\tif prev == nil {\n\t\t\t\tprev = u\n\t\t\t} else {\n\t\t\t\tif *prev.Attributes.FullName == *u.Attributes.FullName {\n\t\t\t\t\tt.Errorf(\"%s got equal Fullname, wanted different %s\", scenario.name, *u.Attributes.FullName)\n\t\t\t\t}\n\t\t\t\tif *prev.Attributes.ImageURL == *u.Attributes.ImageURL {\n\t\t\t\t\tt.Errorf(\"%s got equal ImageURL, wanted different %s\", scenario.name, *u.Attributes.ImageURL)\n\t\t\t\t}\n\t\t\t\tif *prev.ID == *u.ID {\n\t\t\t\t\tt.Errorf(\"%s got equal ID, wanted different %s\", scenario.name, *u.ID)\n\t\t\t\t}\n\t\t\t\tif prev.Type != u.Type {\n\t\t\t\t\tt.Errorf(\"%s got non equal Type, wanted same %s\", scenario.name, u.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) limit(n int) *int {\n\treturn &n\n}\nfunc (s *TestSearchUserSearch) offset(n string) *string {\n\treturn &n\n}\n<commit_msg>use greater-than-equal clause for search results count (#1261)<commit_after>package controller_test\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/almighty\/almighty-core\/account\"\n\t\"github.com\/almighty\/almighty-core\/app\"\n\t\"github.com\/almighty\/almighty-core\/app\/test\"\n\t\"github.com\/almighty\/almighty-core\/application\"\n\t. \"github.com\/almighty\/almighty-core\/controller\"\n\t\"github.com\/almighty\/almighty-core\/gormapplication\"\n\t\"github.com\/almighty\/almighty-core\/gormsupport\/cleaner\"\n\t\"github.com\/almighty\/almighty-core\/gormtestsupport\"\n\t\"github.com\/almighty\/almighty-core\/resource\"\n\t\"github.com\/goadesign\/goa\"\n\tuuid \"github.com\/satori\/go.uuid\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc TestRunSearchUser(t *testing.T) {\n\tresource.Require(t, resource.Database)\n\tpwd, err := os.Getwd()\n\trequire.Nil(t, err)\n\tsuite.Run(t, &TestSearchUserSearch{DBTestSuite: gormtestsupport.NewDBTestSuite(pwd + \"\/..\/config.yaml\")})\n}\n\ntype TestSearchUserSearch struct {\n\tgormtestsupport.DBTestSuite\n\tdb *gormapplication.GormDB\n\tsvc *goa.Service\n\tcontroller *SearchController\n\tclean func()\n}\n\nfunc (s *TestSearchUserSearch) SetupSuite() {\n\ts.DBTestSuite.SetupSuite()\n\ts.svc = goa.New(\"test\")\n\ts.db = gormapplication.NewGormDB(s.DB)\n\ts.controller = NewSearchController(s.svc, s.db, s.Configuration)\n}\n\nfunc (s *TestSearchUserSearch) SetupTest() {\n\ts.clean = cleaner.DeleteCreatedEntities(s.DB)\n}\n\nfunc (s *TestSearchUserSearch) TearDownTest() {\n\ts.clean = cleaner.DeleteCreatedEntities(s.DB)\n}\n\ntype userSearchTestArgs struct {\n\tpageOffset *string\n\tpageLimit *int\n\tq string\n}\n\ntype userSearchTestExpect func(*testing.T, okScenarioUserSearchTest, *app.UserList)\ntype userSearchTestExpects []userSearchTestExpect\n\ntype okScenarioUserSearchTest struct {\n\tname string\n\tuserSearchTestArgs userSearchTestArgs\n\tuserSearchTestExpects userSearchTestExpects\n}\n\nfunc (s *TestSearchUserSearch) TestUsersSearchOK() {\n\n\tidents := s.createTestData()\n\tdefer s.cleanTestData(idents)\n\n\ttests := []okScenarioUserSearchTest{\n\t\t{\"With lowercase fullname query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"x_test_ab\"}, userSearchTestExpects{s.totalCountAtLeast(3)}},\n\t\t{\"With uppercase fullname query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"X_TEST_AB\"}, userSearchTestExpects{s.totalCountAtLeast(3)}},\n\t\t{\"With uppercase email query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"EMAIL_X_TEST_AB\"}, userSearchTestExpects{s.totalCountAtLeast(1)}},\n\t\t{\"With lowercase email query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"email_x_test_ab\"}, userSearchTestExpects{s.totalCountAtLeast(1)}},\n\t\t{\"With username query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"x_test_c\"}, userSearchTestExpects{s.totalCountAtLeast(3)}},\n\t\t{\"with special chars\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"&:\\n!#%?*\"}, userSearchTestExpects{s.totalCount(0)}},\n\t\t{\"with multi page\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"TEST\"}, userSearchTestExpects{s.hasLinks(\"Next\")}},\n\t\t{\"with last page\", userSearchTestArgs{s.offset(strconv.Itoa(len(idents) - 1)), limit(10), \"TEST\"}, userSearchTestExpects{s.hasNoLinks(\"Next\"), s.hasLinks(\"Prev\")}},\n\t\t{\"with different values\", userSearchTestArgs{s.offset(\"0\"), s.limit(10), \"TEST\"}, userSearchTestExpects{s.differentValues()}},\n\t}\n\n\tfor _, tt := range tests {\n\t\t_, result := test.UsersSearchOK(s.T(), context.Background(), s.svc, s.controller, tt.userSearchTestArgs.pageLimit, tt.userSearchTestArgs.pageOffset, tt.userSearchTestArgs.q)\n\t\tfor _, userSearchTestExpect := range tt.userSearchTestExpects {\n\t\t\tuserSearchTestExpect(s.T(), tt, result)\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) TestUsersSearchBadRequest() {\n\tt := s.T()\n\ttests := []struct {\n\t\tname string\n\t\tuserSearchTestArgs userSearchTestArgs\n\t}{\n\t\t{\"with empty query\", userSearchTestArgs{s.offset(\"0\"), limit(10), \"\"}},\n\t}\n\n\tfor _, tt := range tests {\n\t\ttest.UsersSearchBadRequest(t, context.Background(), s.svc, s.controller, tt.userSearchTestArgs.pageLimit, tt.userSearchTestArgs.pageOffset, tt.userSearchTestArgs.q)\n\t}\n}\n\nfunc (s *TestSearchUserSearch) createTestData() []account.Identity {\n\tnames := []string{\"X_TEST_A\", \"X_TEST_AB\", \"X_TEST_B\", \"X_TEST_C\"}\n\temails := []string{\"email_x_test_ab@redhat.org\", \"email_x_test_a@redhat.org\", \"email_x_test_c@redhat.org\", \"email_x_test_b@redhat.org\"}\n\tusernames := []string{\"x_test_b\", \"x_test_c\", \"x_test_a\", \"x_test_ab\"}\n\tfor i := 0; i < 20; i++ {\n\t\tnames = append(names, \"TEST_\"+strconv.Itoa(i))\n\t\temails = append(emails, \"myemail\"+strconv.Itoa(i))\n\t\tusernames = append(usernames, \"myusernames\"+strconv.Itoa(i))\n\t}\n\n\tidents := []account.Identity{}\n\n\terr := application.Transactional(s.db, func(app application.Application) error {\n\t\tfor i, name := range names {\n\n\t\t\tuser := account.User{\n\t\t\t\tFullName: name,\n\t\t\t\tImageURL: \"http:\/\/example.org\/\" + name + \".png\",\n\t\t\t\tEmail: emails[i],\n\t\t\t}\n\t\t\terr := app.Users().Create(context.Background(), &user)\n\t\t\trequire.Nil(s.T(), err)\n\n\t\t\tident := account.Identity{\n\t\t\t\tUser: user,\n\t\t\t\tUsername: usernames[i] + uuid.NewV4().String(),\n\t\t\t\tProviderType: \"kc\",\n\t\t\t}\n\t\t\terr = app.Identities().Create(context.Background(), &ident)\n\t\t\trequire.Nil(s.T(), err)\n\n\t\t\tidents = append(idents, ident)\n\t\t}\n\t\treturn nil\n\t})\n\trequire.Nil(s.T(), err)\n\treturn idents\n}\n\nfunc (s *TestSearchUserSearch) cleanTestData(idents []account.Identity) {\n\terr := application.Transactional(s.db, func(app application.Application) error {\n\t\tdb := app.(*gormapplication.GormTransaction).DB()\n\t\tdb = db.Unscoped()\n\t\tfor _, ident := range idents {\n\t\t\tdb.Delete(ident)\n\t\t\tdb.Delete(&account.User{}, \"id = ?\", ident.User.ID)\n\t\t}\n\t\treturn nil\n\t})\n\trequire.Nil(s.T(), err)\n}\n\nfunc (s *TestSearchUserSearch) totalCount(count int) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tif got := result.Meta.TotalCount; got != count {\n\t\t\tt.Errorf(\"%s got = %v, want %v\", scenario.name, got, count)\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) totalCountAtLeast(count int) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tgot := result.Meta.TotalCount\n\t\tif !(got >= count) {\n\t\t\tt.Errorf(\"%s got %v, wanted at least %v\", scenario.name, got, count)\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) hasLinks(linkNames ...string) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tfor _, linkName := range linkNames {\n\t\t\tlink := linkName\n\t\t\tif reflect.Indirect(reflect.ValueOf(result.Links)).FieldByName(link).IsNil() {\n\t\t\t\tt.Errorf(\"%s got empty link, wanted %s\", scenario.name, link)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) hasNoLinks(linkNames ...string) userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tfor _, linkName := range linkNames {\n\t\t\tif !reflect.Indirect(reflect.ValueOf(result.Links)).FieldByName(linkName).IsNil() {\n\t\t\t\tt.Errorf(\"%s got link, wanted empty %s\", scenario.name, linkName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) differentValues() userSearchTestExpect {\n\treturn func(t *testing.T, scenario okScenarioUserSearchTest, result *app.UserList) {\n\t\tvar prev *app.UserData\n\n\t\tfor i := range result.Data {\n\t\t\tu := result.Data[i]\n\t\t\tif prev == nil {\n\t\t\t\tprev = u\n\t\t\t} else {\n\t\t\t\tif *prev.Attributes.FullName == *u.Attributes.FullName {\n\t\t\t\t\tt.Errorf(\"%s got equal Fullname, wanted different %s\", scenario.name, *u.Attributes.FullName)\n\t\t\t\t}\n\t\t\t\tif *prev.Attributes.ImageURL == *u.Attributes.ImageURL {\n\t\t\t\t\tt.Errorf(\"%s got equal ImageURL, wanted different %s\", scenario.name, *u.Attributes.ImageURL)\n\t\t\t\t}\n\t\t\t\tif *prev.ID == *u.ID {\n\t\t\t\t\tt.Errorf(\"%s got equal ID, wanted different %s\", scenario.name, *u.ID)\n\t\t\t\t}\n\t\t\t\tif prev.Type != u.Type {\n\t\t\t\t\tt.Errorf(\"%s got non equal Type, wanted same %s\", scenario.name, u.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSearchUserSearch) limit(n int) *int {\n\treturn &n\n}\nfunc (s *TestSearchUserSearch) offset(n string) *string {\n\treturn &n\n}\n<|endoftext|>"} {"text":"<commit_before>package uploader\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/itchio\/go-itchio\"\n\t\"github.com\/itchio\/wharf\/counter\"\n\t\"github.com\/itchio\/wharf\/pwr\"\n\t\"github.com\/itchio\/wharf\/splitfunc\"\n\t\"github.com\/itchio\/wharf\/timeout\"\n)\n\nvar seed = 0\n\nfunc fromEnv(envName string, defaultValue int) int {\n\tv := os.Getenv(envName)\n\tif v != \"\" {\n\t\tiv, err := strconv.Atoi(v)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"Override set: %s = %d\", envName, iv)\n\t\t\treturn iv\n\t\t}\n\t}\n\treturn defaultValue\n}\n\nvar resumableMaxRetries = fromEnv(\"WHARF_MAX_RETRIES\", 15)\nvar resumableConnectTimeout = time.Duration(fromEnv(\"WHARF_CONNECT_TIMEOUT\", 30)) * time.Second\nvar resumableIdleTimeout = time.Duration(fromEnv(\"WHARF_IDLE_TIMEOUT\", 60)) * time.Second\n\n\/\/ ResumableUpload keeps track of an upload and reports back on its progress\ntype ResumableUpload struct {\n\tc *itchio.Client\n\n\tTotalBytes int64\n\tUploadedBytes int64\n\tOnProgress func()\n\n\t\/\/ resumable URL as per GCS\n\tuploadURL string\n\n\t\/\/ where data is written so we can update counts\n\twriteCounter io.Writer\n\n\t\/\/ need to flush to squeeze all the data out\n\tbufferedWriter *bufio.Writer\n\n\t\/\/ need to close so reader end of pipe gets EOF\n\tpipeWriter io.Closer\n\n\tid int\n\tconsumer *pwr.StateConsumer\n}\n\n\/\/ Close flushes all intermediary buffers and closes the connection\nfunc (ru *ResumableUpload) Close() error {\n\tvar err error\n\n\tru.Debugf(\"flushing buffered writer, %d written\", ru.TotalBytes)\n\n\terr = ru.bufferedWriter.Flush()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\tru.Debugf(\"closing pipe writer\")\n\n\terr = ru.pipeWriter.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\tru.Debugf(\"closed pipe writer\")\n\tru.Debugf(\"everything closed! uploadedbytes = %d, totalbytes = %d\", ru.UploadedBytes, ru.TotalBytes)\n\n\treturn nil\n}\n\n\/\/ Write is our implementation of io.Writer\nfunc (ru *ResumableUpload) Write(p []byte) (int, error) {\n\treturn ru.writeCounter.Write(p)\n}\n\nfunc NewResumableUpload(uploadURL string, done chan bool, errs chan error, consumer *pwr.StateConsumer) (*ResumableUpload, error) {\n\tru := &ResumableUpload{}\n\tru.uploadURL = uploadURL\n\tru.id = seed\n\tseed++\n\tru.consumer = consumer\n\tru.c = itchio.ClientWithKey(\"x\")\n\tru.c.HTTPClient = timeout.NewClient(resumableConnectTimeout, resumableIdleTimeout)\n\n\tpipeR, pipeW := io.Pipe()\n\n\tru.pipeWriter = pipeW\n\n\t\/\/ TODO: make configurable?\n\tconst bufferSize = 32 * 1024 * 1024\n\n\tbufferedWriter := bufio.NewWriterSize(pipeW, bufferSize)\n\tru.bufferedWriter = bufferedWriter\n\n\tonWrite := func(count int64) {\n\t\t\/\/ ru.Debugf(\"onwrite %d\", count)\n\t\tru.TotalBytes = count\n\t\tif ru.OnProgress != nil {\n\t\t\tru.OnProgress()\n\t\t}\n\t}\n\tru.writeCounter = counter.NewWriterCallback(onWrite, bufferedWriter)\n\n\tgo ru.uploadChunks(pipeR, done, errs)\n\n\treturn ru, nil\n}\n\nfunc (ru *ResumableUpload) Debugf(f string, args ...interface{}) {\n\tru.consumer.Debugf(\"[upload %d] %s\", ru.id, fmt.Sprintf(f, args...))\n}\n\nconst minChunkSize = 256 * 1024 \/\/ 256KB\nconst maxChunkGroup = 64\nconst maxSendBuf = maxChunkGroup * minChunkSize \/\/ 16MB\n\ntype blockItem struct {\n\tbuf []byte\n\tisLast bool\n}\n\ntype netError struct {\n\terr error\n}\n\nfunc (ne *netError) Error() string {\n\treturn fmt.Sprintf(\"network error: %s\", ne.err.Error())\n}\n\nfunc (ru *ResumableUpload) uploadChunks(reader io.Reader, done chan bool, errs chan error) {\n\tvar offset int64 = 0\n\n\tsendBuf := make([]byte, 0, maxSendBuf)\n\treqBlocks := make(chan blockItem, maxChunkGroup)\n\n\tcanceller := make(chan bool)\n\n\tdoSendBytesOnce := func(buf []byte, isLast bool) error {\n\t\tbuflen := int64(len(sendBuf))\n\t\tru.Debugf(\"uploading chunk of %d bytes\", buflen)\n\n\t\tbody := bytes.NewReader(buf)\n\t\tcountingReader := counter.NewReaderCallback(func(count int64) {\n\t\t\tru.UploadedBytes = offset + count\n\t\t\tif ru.OnProgress != nil {\n\t\t\t\tru.OnProgress()\n\t\t\t}\n\t\t}, body)\n\n\t\treq, err := http.NewRequest(\"PUT\", ru.uploadURL, countingReader)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, 1)\n\t\t}\n\n\t\tstart := offset\n\t\tend := start + buflen - 1\n\t\tcontentRange := fmt.Sprintf(\"bytes %d-%d\/*\", offset, end)\n\t\tru.Debugf(\"uploading %d-%d, last? %v\", start, end, isLast)\n\n\t\tif isLast {\n\t\t\tcontentRange = fmt.Sprintf(\"bytes %d-%d\/%d\", offset, end, offset+buflen)\n\t\t}\n\n\t\treq.Header.Set(\"content-range\", contentRange)\n\n\t\tres, err := ru.c.Do(req)\n\t\tif err != nil {\n\t\t\tru.Debugf(\"while uploading %d-%d: \\n%s\", start, end, err.Error())\n\t\t\treturn &netError{err}\n\t\t}\n\n\t\tif res.StatusCode != 200 && res.StatusCode != 308 {\n\t\t\tru.Debugf(\"uh oh, got HTTP %s\", res.Status)\n\t\t\tresb, _ := ioutil.ReadAll(res.Body)\n\t\t\tru.Debugf(\"server said %s\", string(resb))\n\t\t\terr = fmt.Errorf(\"HTTP %d while uploading\", res.StatusCode)\n\n\t\t\t\/\/ retry requests that return these, see full list\n\t\t\t\/\/ at https:\/\/cloud.google.com\/storage\/docs\/xml-api\/resumable-upload\n\t\t\t\/\/ see also https:\/\/github.com\/itchio\/butler\/issues\/71\n\t\t\tif res.StatusCode == 408 \/* Request Timeout *\/ ||\n\t\t\t\tres.StatusCode == 500 \/* Internal Server Error *\/ ||\n\t\t\t\tres.StatusCode == 502 \/* Bad Gateway *\/ ||\n\t\t\t\tres.StatusCode == 503 \/* Service Unavailable *\/ ||\n\t\t\t\tres.StatusCode == 504 \/* Gateway Timeout *\/ {\n\t\t\t\treturn &netError{err}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\toffset += buflen\n\t\tru.Debugf(\"%s uploaded, at %s\", humanize.Bytes(uint64(offset)), res.Status)\n\t\treturn nil\n\t}\n\n\tdoSendBytes := func(buf []byte, isLast bool) error {\n\t\ttries := 1\n\n\t\tfor tries < resumableMaxRetries {\n\t\t\terr := doSendBytesOnce(buf, isLast)\n\t\t\tif err != nil {\n\t\t\t\tif ne, ok := err.(*netError); ok {\n\t\t\t\t\tdelay := tries * tries\n\t\t\t\t\tru.consumer.PauseProgress()\n\t\t\t\t\tru.consumer.Infof(\"\")\n\t\t\t\t\tru.consumer.Infof(\"%s\", ne.Error())\n\t\t\t\t\tru.consumer.Infof(\"Sleeping %d seconds then retrying\", delay)\n\t\t\t\t\ttime.Sleep(time.Second * time.Duration(delay))\n\t\t\t\t\tru.consumer.ResumeProgress()\n\t\t\t\t\ttries++\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\treturn errors.Wrap(err, 1)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Too many network errors, giving up.\")\n\t}\n\n\ts := bufio.NewScanner(reader)\n\ts.Buffer(make([]byte, minChunkSize), 0)\n\ts.Split(splitfunc.New(minChunkSize))\n\n\t\/\/ we need two buffers to know when we're at EOF,\n\t\/\/ for sizes that are an exact multiple of minChunkSize\n\tbuf1 := make([]byte, 0, minChunkSize)\n\tbuf2 := make([]byte, 0, minChunkSize)\n\n\tsubDone := make(chan bool)\n\tsubErrs := make(chan error)\n\n\tru.Debugf(\"kicking off sender\")\n\n\tgo func() {\n\t\tisLast := false\n\n\t\tfor !isLast {\n\t\t\tsendBuf = sendBuf[:0]\n\n\t\t\tfor len(sendBuf) < maxSendBuf && !isLast {\n\t\t\t\tvar item blockItem\n\t\t\t\tif len(sendBuf) == 0 {\n\t\t\t\t\tru.Debugf(\"sender blocking receive\")\n\t\t\t\t\tselect {\n\t\t\t\t\tcase item = <-reqBlocks:\n\t\t\t\t\t\t\/\/ cool\n\t\t\t\t\tcase <-canceller:\n\t\t\t\t\t\tru.Debugf(\"send cancelled\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tru.Debugf(\"sender non-blocking receive\")\n\t\t\t\t\tselect {\n\t\t\t\t\tcase item = <-reqBlocks:\n\t\t\t\t\t\t\/\/ cool\n\t\t\t\t\tcase <-canceller:\n\t\t\t\t\t\tru.Debugf(\"send cancelled\")\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tru.Debugf(\"sent faster than scanned, uploading smaller chunk\")\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif item.isLast {\n\t\t\t\t\tisLast = true\n\t\t\t\t}\n\n\t\t\t\tsendBuf = append(sendBuf, item.buf...)\n\t\t\t}\n\n\t\t\tif len(sendBuf) > 0 {\n\t\t\t\terr := doSendBytes(sendBuf, isLast)\n\t\t\t\tif err != nil {\n\t\t\t\t\tru.Debugf(\"send error, bailing out\")\n\t\t\t\t\tsubErrs <- errors.Wrap(err, 1)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tsubDone <- true\n\t\tru.Debugf(\"sender done\")\n\t}()\n\n\tscannedBufs := make(chan []byte)\n\tusedBufs := make(chan bool)\n\n\tgo func() {\n\t\tfor s.Scan() {\n\t\t\tselect {\n\t\t\tcase scannedBufs <- s.Bytes():\n\t\t\t\t\/\/ woo\n\t\t\tcase <-canceller:\n\t\t\t\tru.Debugf(\"scan cancelled (1)\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-usedBufs:\n\t\t\t\t\/\/ woo\n\t\t\tcase <-canceller:\n\t\t\t\tru.Debugf(\"scan cancelled (2)\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tclose(scannedBufs)\n\t}()\n\n\t\/\/ break patch into chunks of minChunkSize, signal last block\n\tgo func() {\n\t\tfor scannedBuf := range scannedBufs {\n\t\t\tbuf2 = append(buf2[:0], buf1...)\n\t\t\tbuf1 = append(buf1[:0], scannedBuf...)\n\t\t\tusedBufs <- true\n\n\t\t\t\/\/ all but first iteration\n\t\t\tif len(buf2) > 0 {\n\t\t\t\tselect {\n\t\t\t\tcase reqBlocks <- blockItem{buf: append([]byte{}, buf2...), isLast: false}:\n\t\t\t\t\t\/\/ okay cool let's go c'mon\n\t\t\t\tcase <-canceller:\n\t\t\t\t\tru.Debugf(\"scan cancelled (3)\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr := s.Err()\n\t\tif err != nil {\n\t\t\tru.Debugf(\"scanner error :(\")\n\t\t\tsubErrs <- errors.Wrap(err, 1)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase reqBlocks <- blockItem{buf: append([]byte{}, buf1...), isLast: true}:\n\t\tcase <-canceller:\n\t\t\tru.Debugf(\"scan cancelled (right near the finish line)\")\n\t\t\treturn\n\t\t}\n\n\t\tsubDone <- true\n\t\tru.Debugf(\"scanner done\")\n\t}()\n\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase <-subDone:\n\t\t\t\/\/ woo!\n\t\tcase err := <-subErrs:\n\t\t\tru.Debugf(\"got sub error: %s, bailing\", err.Error())\n\t\t\tclose(canceller)\n\t\t\terrs <- errors.Wrap(err, 1)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdone <- true\n\tru.Debugf(\"done sent!\")\n}\n<commit_msg>More resumable debug, retry when last block responds with 308<commit_after>package uploader\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/itchio\/go-itchio\"\n\t\"github.com\/itchio\/wharf\/counter\"\n\t\"github.com\/itchio\/wharf\/pwr\"\n\t\"github.com\/itchio\/wharf\/splitfunc\"\n\t\"github.com\/itchio\/wharf\/timeout\"\n)\n\nvar seed = 0\n\nfunc fromEnv(envName string, defaultValue int) int {\n\tv := os.Getenv(envName)\n\tif v != \"\" {\n\t\tiv, err := strconv.Atoi(v)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"Override set: %s = %d\", envName, iv)\n\t\t\treturn iv\n\t\t}\n\t}\n\treturn defaultValue\n}\n\nvar resumableMaxRetries = fromEnv(\"WHARF_MAX_RETRIES\", 15)\nvar resumableConnectTimeout = time.Duration(fromEnv(\"WHARF_CONNECT_TIMEOUT\", 30)) * time.Second\nvar resumableIdleTimeout = time.Duration(fromEnv(\"WHARF_IDLE_TIMEOUT\", 60)) * time.Second\n\n\/\/ ResumableUpload keeps track of an upload and reports back on its progress\ntype ResumableUpload struct {\n\tc *itchio.Client\n\n\tTotalBytes int64\n\tUploadedBytes int64\n\tOnProgress func()\n\n\t\/\/ resumable URL as per GCS\n\tuploadURL string\n\n\t\/\/ where data is written so we can update counts\n\twriteCounter io.Writer\n\n\t\/\/ need to flush to squeeze all the data out\n\tbufferedWriter *bufio.Writer\n\n\t\/\/ need to close so reader end of pipe gets EOF\n\tpipeWriter io.Closer\n\n\tid int\n\tconsumer *pwr.StateConsumer\n}\n\n\/\/ Close flushes all intermediary buffers and closes the connection\nfunc (ru *ResumableUpload) Close() error {\n\tvar err error\n\n\tru.Debugf(\"flushing buffered writer, %d written\", ru.TotalBytes)\n\n\terr = ru.bufferedWriter.Flush()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\tru.Debugf(\"closing pipe writer\")\n\n\terr = ru.pipeWriter.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\tru.Debugf(\"closed pipe writer\")\n\tru.Debugf(\"everything closed! uploadedbytes = %d, totalbytes = %d\", ru.UploadedBytes, ru.TotalBytes)\n\n\treturn nil\n}\n\n\/\/ Write is our implementation of io.Writer\nfunc (ru *ResumableUpload) Write(p []byte) (int, error) {\n\treturn ru.writeCounter.Write(p)\n}\n\nfunc NewResumableUpload(uploadURL string, done chan bool, errs chan error, consumer *pwr.StateConsumer) (*ResumableUpload, error) {\n\tru := &ResumableUpload{}\n\tru.uploadURL = uploadURL\n\tru.id = seed\n\tseed++\n\tru.consumer = consumer\n\tru.c = itchio.ClientWithKey(\"x\")\n\tru.c.HTTPClient = timeout.NewClient(resumableConnectTimeout, resumableIdleTimeout)\n\n\tpipeR, pipeW := io.Pipe()\n\n\tru.pipeWriter = pipeW\n\n\t\/\/ TODO: make configurable?\n\tconst bufferSize = 32 * 1024 * 1024\n\n\tbufferedWriter := bufio.NewWriterSize(pipeW, bufferSize)\n\tru.bufferedWriter = bufferedWriter\n\n\tonWrite := func(count int64) {\n\t\t\/\/ ru.Debugf(\"onwrite %d\", count)\n\t\tru.TotalBytes = count\n\t\tif ru.OnProgress != nil {\n\t\t\tru.OnProgress()\n\t\t}\n\t}\n\tru.writeCounter = counter.NewWriterCallback(onWrite, bufferedWriter)\n\n\tgo ru.uploadChunks(pipeR, done, errs)\n\n\treturn ru, nil\n}\n\nfunc (ru *ResumableUpload) Debugf(f string, args ...interface{}) {\n\tru.consumer.Debugf(\"[upload %d] %s\", ru.id, fmt.Sprintf(f, args...))\n}\n\nconst minChunkSize = 256 * 1024 \/\/ 256KB\nconst maxChunkGroup = 64\nconst maxSendBuf = maxChunkGroup * minChunkSize \/\/ 16MB\n\ntype blockItem struct {\n\tbuf []byte\n\tisLast bool\n}\n\ntype netError struct {\n\terr error\n}\n\nfunc (ne *netError) Error() string {\n\treturn fmt.Sprintf(\"network error: %s\", ne.err.Error())\n}\n\nfunc (ru *ResumableUpload) uploadChunks(reader io.Reader, done chan bool, errs chan error) {\n\tvar offset int64 = 0\n\n\tsendBuf := make([]byte, 0, maxSendBuf)\n\treqBlocks := make(chan blockItem, maxChunkGroup)\n\n\tcanceller := make(chan bool)\n\n\tdoSendBytesOnce := func(buf []byte, isLast bool) error {\n\t\tbuflen := int64(len(sendBuf))\n\t\tru.Debugf(\"uploading chunk of %d bytes\", buflen)\n\n\t\tbody := bytes.NewReader(buf)\n\t\tcountingReader := counter.NewReaderCallback(func(count int64) {\n\t\t\tru.UploadedBytes = offset + count\n\t\t\tif ru.OnProgress != nil {\n\t\t\t\tru.OnProgress()\n\t\t\t}\n\t\t}, body)\n\n\t\treq, err := http.NewRequest(\"PUT\", ru.uploadURL, countingReader)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, 1)\n\t\t}\n\n\t\tstart := offset\n\t\tend := start + buflen - 1\n\t\tcontentRange := fmt.Sprintf(\"bytes %d-%d\/*\", offset, end)\n\t\tru.Debugf(\"uploading %d-%d, last? %v\", start, end, isLast)\n\n\t\tif isLast {\n\t\t\tcontentRange = fmt.Sprintf(\"bytes %d-%d\/%d\", offset, end, offset+buflen)\n\t\t}\n\n\t\treq.Header.Set(\"content-range\", contentRange)\n\n\t\tstartTime := time.Now()\n\n\t\tres, err := ru.c.Do(req)\n\t\tif err != nil {\n\t\t\tru.Debugf(\"while uploading %d-%d: \\n%s\", start, end, err.Error())\n\t\t\treturn &netError{err}\n\t\t}\n\n\t\tru.Debugf(\"server replied in %s, with status %s\", time.Since(startTime), res.Status)\n\t\tfor k, v := range res.Header {\n\t\t\tru.Debugf(\"[Reply header] %s: %s\", k, v)\n\t\t}\n\n\t\tif res.StatusCode != 200 && res.StatusCode != 308 {\n\t\t\tru.Debugf(\"uh oh, got HTTP %s\", res.Status)\n\t\t\tresb, _ := ioutil.ReadAll(res.Body)\n\t\t\tru.Debugf(\"server said %s\", string(resb))\n\t\t\terr = fmt.Errorf(\"HTTP %d while uploading\", res.StatusCode)\n\n\t\t\t\/\/ retry requests that return these, see full list\n\t\t\t\/\/ at https:\/\/cloud.google.com\/storage\/docs\/xml-api\/resumable-upload\n\t\t\t\/\/ see also https:\/\/github.com\/itchio\/butler\/issues\/71\n\t\t\tif res.StatusCode == 408 \/* Request Timeout *\/ ||\n\t\t\t\tres.StatusCode == 500 \/* Internal Server Error *\/ ||\n\t\t\t\tres.StatusCode == 502 \/* Bad Gateway *\/ ||\n\t\t\t\tres.StatusCode == 503 \/* Service Unavailable *\/ ||\n\t\t\t\tres.StatusCode == 504 \/* Gateway Timeout *\/ {\n\t\t\t\treturn &netError{err}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tif res.StatusCode == 308 && isLast {\n\t\t\tru.Debugf(\"Got 308 on last, retrying...\")\n\t\t\terr = fmt.Errorf(\"HTTP %d while sending last block, that's no good.\", res.StatusCode)\n\t\t\treturn &netError{err}\n\t\t}\n\n\t\toffset += buflen\n\t\tru.Debugf(\"%s uploaded, at %s\", humanize.Bytes(uint64(offset)), res.Status)\n\t\treturn nil\n\t}\n\n\tdoSendBytes := func(buf []byte, isLast bool) error {\n\t\ttries := 1\n\n\t\tfor tries < resumableMaxRetries {\n\t\t\terr := doSendBytesOnce(buf, isLast)\n\t\t\tif err != nil {\n\t\t\t\tif ne, ok := err.(*netError); ok {\n\t\t\t\t\tdelay := tries * tries\n\t\t\t\t\tru.consumer.PauseProgress()\n\t\t\t\t\tru.consumer.Infof(\"\")\n\t\t\t\t\tru.consumer.Infof(\"%s\", ne.Error())\n\t\t\t\t\tru.consumer.Infof(\"Sleeping %d seconds then retrying\", delay)\n\t\t\t\t\ttime.Sleep(time.Second * time.Duration(delay))\n\t\t\t\t\tru.consumer.ResumeProgress()\n\t\t\t\t\ttries++\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\treturn errors.Wrap(err, 1)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Too many network errors, giving up.\")\n\t}\n\n\ts := bufio.NewScanner(reader)\n\ts.Buffer(make([]byte, minChunkSize), 0)\n\ts.Split(splitfunc.New(minChunkSize))\n\n\t\/\/ we need two buffers to know when we're at EOF,\n\t\/\/ for sizes that are an exact multiple of minChunkSize\n\tbuf1 := make([]byte, 0, minChunkSize)\n\tbuf2 := make([]byte, 0, minChunkSize)\n\n\tsubDone := make(chan bool)\n\tsubErrs := make(chan error)\n\n\tru.Debugf(\"kicking off sender\")\n\n\tgo func() {\n\t\tisLast := false\n\n\t\tfor !isLast {\n\t\t\tsendBuf = sendBuf[:0]\n\n\t\t\tfor len(sendBuf) < maxSendBuf && !isLast {\n\t\t\t\tvar item blockItem\n\t\t\t\tif len(sendBuf) == 0 {\n\t\t\t\t\tru.Debugf(\"sender blocking receive\")\n\t\t\t\t\tselect {\n\t\t\t\t\tcase item = <-reqBlocks:\n\t\t\t\t\t\t\/\/ cool\n\t\t\t\t\tcase <-canceller:\n\t\t\t\t\t\tru.Debugf(\"send cancelled\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tru.Debugf(\"sender non-blocking receive\")\n\t\t\t\t\tselect {\n\t\t\t\t\tcase item = <-reqBlocks:\n\t\t\t\t\t\t\/\/ cool\n\t\t\t\t\tcase <-canceller:\n\t\t\t\t\t\tru.Debugf(\"send cancelled\")\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tru.Debugf(\"sent faster than scanned, uploading smaller chunk\")\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif item.isLast {\n\t\t\t\t\tisLast = true\n\t\t\t\t}\n\n\t\t\t\tsendBuf = append(sendBuf, item.buf...)\n\t\t\t}\n\n\t\t\tif len(sendBuf) > 0 {\n\t\t\t\terr := doSendBytes(sendBuf, isLast)\n\t\t\t\tif err != nil {\n\t\t\t\t\tru.Debugf(\"send error, bailing out\")\n\t\t\t\t\tsubErrs <- errors.Wrap(err, 1)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tsubDone <- true\n\t\tru.Debugf(\"sender done\")\n\t}()\n\n\tscannedBufs := make(chan []byte)\n\tusedBufs := make(chan bool)\n\n\tgo func() {\n\t\tfor s.Scan() {\n\t\t\tselect {\n\t\t\tcase scannedBufs <- s.Bytes():\n\t\t\t\t\/\/ woo\n\t\t\tcase <-canceller:\n\t\t\t\tru.Debugf(\"scan cancelled (1)\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-usedBufs:\n\t\t\t\t\/\/ woo\n\t\t\tcase <-canceller:\n\t\t\t\tru.Debugf(\"scan cancelled (2)\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tclose(scannedBufs)\n\t}()\n\n\t\/\/ break patch into chunks of minChunkSize, signal last block\n\tgo func() {\n\t\tfor scannedBuf := range scannedBufs {\n\t\t\tbuf2 = append(buf2[:0], buf1...)\n\t\t\tbuf1 = append(buf1[:0], scannedBuf...)\n\t\t\tusedBufs <- true\n\n\t\t\t\/\/ all but first iteration\n\t\t\tif len(buf2) > 0 {\n\t\t\t\tselect {\n\t\t\t\tcase reqBlocks <- blockItem{buf: append([]byte{}, buf2...), isLast: false}:\n\t\t\t\t\t\/\/ okay cool let's go c'mon\n\t\t\t\tcase <-canceller:\n\t\t\t\t\tru.Debugf(\"scan cancelled (3)\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr := s.Err()\n\t\tif err != nil {\n\t\t\tru.Debugf(\"scanner error :(\")\n\t\t\tsubErrs <- errors.Wrap(err, 1)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase reqBlocks <- blockItem{buf: append([]byte{}, buf1...), isLast: true}:\n\t\tcase <-canceller:\n\t\t\tru.Debugf(\"scan cancelled (right near the finish line)\")\n\t\t\treturn\n\t\t}\n\n\t\tsubDone <- true\n\t\tru.Debugf(\"scanner done\")\n\t}()\n\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase <-subDone:\n\t\t\t\/\/ woo!\n\t\tcase err := <-subErrs:\n\t\t\tru.Debugf(\"got sub error: %s, bailing\", err.Error())\n\t\t\tclose(canceller)\n\t\t\terrs <- errors.Wrap(err, 1)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdone <- true\n\tru.Debugf(\"done sent!\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package usagerecord provides the \/subscription_items\/{SUBSCRIPTION_ITEM_ID}\/usage_records APIs\npackage usagerecord\n\nimport (\n\t\"net\/http\"\n\n\tstripe \"github.com\/stripe\/stripe-go\"\n)\n\n\/\/ Client is used to invoke \/plans APIs.\ntype Client struct {\n\tB stripe.Backend\n\tKey string\n}\n\n\/\/ New creates a new usage record.\nfunc New(params *stripe.UsageRecordParams) (*stripe.UsageRecord, error) {\n\treturn getC().New(params)\n}\n\n\/\/ New creates a new usage record.\nfunc (c Client) New(params *stripe.UsageRecordParams) (*stripe.UsageRecord, error) {\n\tpath := stripe.FormatURLPath(\"\/subscription_items\/%s\/usage_records\", stripe.StringValue(params.SubscriptionItem))\n\trecord := &stripe.UsageRecord{}\n\terr := c.B.Call(http.MethodPost, path, c.Key, params, record)\n\treturn record, err\n}\n\nfunc getC() Client {\n\treturn Client{stripe.GetBackend(stripe.APIBackend), stripe.Key}\n}\n<commit_msg>Correct comment on `usagerecord`'s `Client` struct<commit_after>\/\/ Package usagerecord provides the \/subscription_items\/{SUBSCRIPTION_ITEM_ID}\/usage_records APIs\npackage usagerecord\n\nimport (\n\t\"net\/http\"\n\n\tstripe \"github.com\/stripe\/stripe-go\"\n)\n\n\/\/ Client is used to invoke APIs related to usage records.\ntype Client struct {\n\tB stripe.Backend\n\tKey string\n}\n\n\/\/ New creates a new usage record.\nfunc New(params *stripe.UsageRecordParams) (*stripe.UsageRecord, error) {\n\treturn getC().New(params)\n}\n\n\/\/ New creates a new usage record.\nfunc (c Client) New(params *stripe.UsageRecordParams) (*stripe.UsageRecord, error) {\n\tpath := stripe.FormatURLPath(\"\/subscription_items\/%s\/usage_records\", stripe.StringValue(params.SubscriptionItem))\n\trecord := &stripe.UsageRecord{}\n\terr := c.B.Call(http.MethodPost, path, c.Key, params, record)\n\treturn record, err\n}\n\nfunc getC() Client {\n\treturn Client{stripe.GetBackend(stripe.APIBackend), stripe.Key}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:generate struct-markdown\n\/\/go:generate mapstructure-to-hcl2 -type ExportConfig\n\npackage common\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/packer\/builder\/vsphere\/driver\"\n\t\"github.com\/hashicorp\/packer\/common\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\t\"github.com\/hashicorp\/packer\/packer\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/vmware\/govmomi\/nfc\"\n\t\"github.com\/vmware\/govmomi\/vim25\/soap\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n)\n\n\/\/ You may optionally export an ovf from VSphere to the instance running Packer.\n\/\/\n\/\/ Example usage:\n\/\/\n\/\/ ```json\n\/\/ ...\n\/\/ \"vm_name\": \"example-ubuntu\",\n\/\/ ...\n\/\/ \"export\": {\n\/\/ \"force\": true,\n\/\/ \"output_directory\": \".\/output_vsphere\"\n\/\/ },\n\/\/ ```\n\/\/ The above configuration would create the following files:\n\/\/\n\/\/ ```text\n\/\/ .\/output_vsphere\/example-ubuntu-disk-0.vmdk\n\/\/ .\/output_vsphere\/example-ubuntu.mf\n\/\/ .\/output_vsphere\/example-ubuntu.ovf\n\/\/ ```\ntype ExportConfig struct {\n\t\/\/ name of the ovf. defaults to the name of the VM\n\tName string `mapstructure:\"name\"`\n\t\/\/ overwrite ovf if it exists\n\tForce bool `mapstructure:\"force\"`\n\t\/\/ include iso and img image files that are attached to the VM\n\tImages bool `mapstructure:\"images\"`\n\t\/\/ generate manifest using sha1, sha256, sha512. Defaults to 'sha256'. Use 'none' for no manifest.\n\tManifest string `mapstructure:\"manifest\"`\n\t\/\/ Directory on the computer running Packer to export files to\n\tOutputDir OutputConfig `mapstructure:\",squash\"`\n\t\/\/ Advanced ovf export options. Options can include:\n\t\/\/ * mac - MAC address is exported for all ethernet devices\n\t\/\/ * uuid - UUID is exported for all virtual machines\n\t\/\/ * extraconfig - all extra configuration options are exported for a virtual machine\n\t\/\/ * nodevicesubtypes - resource subtypes for CD\/DVD drives, floppy drives, and serial and parallel ports are not exported\n\t\/\/\n\t\/\/ For example, adding the following export config option would output the mac addresses for all Ethernet devices in the ovf file:\n\t\/\/\n\t\/\/ ```json\n\t\/\/ ...\n\t\/\/ \"export\": {\n\t\/\/ \"options\": [\"mac\"]\n\t\/\/ },\n\t\/\/ ```\n\tOptions []string `mapstructure:\"options\"`\n}\n\nvar sha = map[string]func() hash.Hash{\n\t\"none\": nil,\n\t\"sha1\": sha1.New,\n\t\"sha256\": sha256.New,\n\t\"sha512\": sha512.New,\n}\n\nfunc (c *ExportConfig) Prepare(ctx *interpolate.Context, lc *LocationConfig, pc *common.PackerConfig) []error {\n\tvar errs *packer.MultiError\n\n\terrs = packer.MultiErrorAppend(errs, c.OutputDir.Prepare(ctx, pc)...)\n\n\t\/\/ manifest should default to sha256\n\tif c.Manifest == \"\" {\n\t\tc.Manifest = \"sha256\"\n\t}\n\tif _, ok := sha[c.Manifest]; !ok {\n\t\terrs = packer.MultiErrorAppend(errs, fmt.Errorf(\"unknown hash: %s. available options include available options being 'none', 'sha1', 'sha256', 'sha512'\", c.Manifest))\n\t}\n\n\tif c.Name == \"\" {\n\t\tc.Name = lc.VMName\n\t}\n\ttarget := getTarget(c.OutputDir.OutputDir, c.Name)\n\tif !c.Force {\n\t\tif _, err := os.Stat(target); err == nil {\n\t\t\terrs = packer.MultiErrorAppend(errs, fmt.Errorf(\"file already exists: %s\", target))\n\t\t}\n\t}\n\n\tif err := os.MkdirAll(c.OutputDir.OutputDir, 0750); err != nil {\n\t\terrs = packer.MultiErrorAppend(errs, errors.Wrap(err, \"unable to make directory for export\"))\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn errs.Errors\n\t}\n\n\treturn nil\n}\n\nfunc getTarget(dir string, name string) string {\n\treturn filepath.Join(dir, name+\".ovf\")\n}\n\ntype StepExport struct {\n\tName string\n\tForce bool\n\tImages bool\n\tManifest string\n\tOutputDir string\n\tOptions []string\n\tmf bytes.Buffer\n}\n\nfunc (s *StepExport) Cleanup(multistep.StateBag) {\n}\n\nfunc (s *StepExport) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {\n\tui := state.Get(\"ui\").(packer.Ui)\n\tvm := state.Get(\"vm\").(*driver.VirtualMachine)\n\n\tui.Message(\"Starting export...\")\n\tlease, err := vm.Export()\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"error exporting vm\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tinfo, err := lease.Wait(ctx, nil)\n\tif err != nil {\n\t\tstate.Put(\"error\", err)\n\t\treturn multistep.ActionHalt\n\t}\n\n\tu := lease.StartUpdater(ctx, info)\n\tdefer u.Done()\n\n\tcdp := types.OvfCreateDescriptorParams{\n\t\tName: s.Name,\n\t}\n\n\tm := vm.NewOvfManager()\n\tif len(s.Options) > 0 {\n\t\texportOptions, err := vm.GetOvfExportOptions(m)\n\t\tif err != nil {\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tvar unknown []string\n\t\tfor _, option := range s.Options {\n\t\t\tfound := false\n\t\t\tfor _, exportOpt := range exportOptions {\n\t\t\t\tif exportOpt.Option == option {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tunknown = append(unknown, option)\n\t\t\t}\n\t\t\tcdp.ExportOption = append(cdp.ExportOption, option)\n\t\t}\n\n\t\t\/\/ only printing error message because the unknown options are just ignored by vcenter\n\t\tif len(unknown) > 0 {\n\t\t\tui.Error(fmt.Sprintf(\"unknown export options %s\", strings.Join(unknown, \",\")))\n\t\t}\n\t}\n\n\tfor _, i := range info.Items {\n\t\tif !s.include(&i) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !strings.HasPrefix(i.Path, s.Name) {\n\t\t\ti.Path = s.Name + \"-\" + i.Path\n\t\t}\n\n\t\tui.Message(\"Downloading: \" + i.File().Path)\n\t\terr = s.Download(ctx, lease, i)\n\t\tif err != nil {\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tui.Message(\"Exporting file: \" + i.File().Path)\n\t\tcdp.OvfFiles = append(cdp.OvfFiles, i.File())\n\t}\n\n\tif err = lease.Complete(ctx); err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to complete lease\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tdesc, err := vm.CreateDescriptor(m, cdp)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to create descriptor\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\ttarget := getTarget(s.OutputDir, s.Name)\n\tfile, err := os.Create(target)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to create file: \"+target))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tvar w io.Writer = file\n\th, ok := s.newHash()\n\tif ok {\n\t\tw = io.MultiWriter(file, h)\n\t}\n\n\tui.Message(\"Writing ovf...\")\n\t_, err = io.WriteString(w, desc.OvfDescriptor)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to write descriptor\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tif err = file.Close(); err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to close descriptor\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tif s.Manifest == \"none\" {\n\t\t\/\/ manifest does not need to be created, return\n\t\treturn multistep.ActionContinue\n\t}\n\n\tui.Message(\"Creating manifest...\")\n\ts.addHash(filepath.Base(target), h)\n\n\tfile, err = os.Create(filepath.Join(s.OutputDir, s.Name+\".mf\"))\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to create manifest\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\t_, err = io.Copy(file, &s.mf)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to write manifest\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\terr = file.Close()\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to close file\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tui.Message(\"Finished exporting...\")\n\treturn multistep.ActionContinue\n}\n\nfunc (s *StepExport) include(item *nfc.FileItem) bool {\n\tif s.Images {\n\t\treturn true\n\t}\n\n\treturn filepath.Ext(item.Path) == \".vmdk\"\n}\n\nfunc (s *StepExport) newHash() (hash.Hash, bool) {\n\t\/\/ check if function is nil to handle the 'none' case\n\tif h, ok := sha[s.Manifest]; ok && h != nil {\n\t\treturn h(), true\n\t}\n\n\treturn nil, false\n}\n\nfunc (s *StepExport) addHash(p string, h hash.Hash) {\n\t_, _ = fmt.Fprintf(&s.mf, \"%s(%s)= %x\\n\", strings.ToUpper(s.Manifest), p, h.Sum(nil))\n}\n\nfunc (s *StepExport) Download(ctx context.Context, lease *nfc.Lease, item nfc.FileItem) error {\n\tpath := filepath.Join(s.OutputDir, item.Path)\n\topts := soap.Download{}\n\n\tif h, ok := s.newHash(); ok {\n\t\topts.Writer = h\n\t\tdefer s.addHash(item.Path, h)\n\t}\n\n\treturn lease.DownloadFile(ctx, path, item, opts)\n}\n<commit_msg>fix file size descriptor (#9568)<commit_after>\/\/go:generate struct-markdown\n\/\/go:generate mapstructure-to-hcl2 -type ExportConfig\n\npackage common\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/packer\/builder\/vsphere\/driver\"\n\t\"github.com\/hashicorp\/packer\/common\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\t\"github.com\/hashicorp\/packer\/packer\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/vmware\/govmomi\/nfc\"\n\t\"github.com\/vmware\/govmomi\/vim25\/soap\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n)\n\n\/\/ You may optionally export an ovf from VSphere to the instance running Packer.\n\/\/\n\/\/ Example usage:\n\/\/\n\/\/ ```json\n\/\/ ...\n\/\/ \"vm_name\": \"example-ubuntu\",\n\/\/ ...\n\/\/ \"export\": {\n\/\/ \"force\": true,\n\/\/ \"output_directory\": \".\/output_vsphere\"\n\/\/ },\n\/\/ ```\n\/\/ The above configuration would create the following files:\n\/\/\n\/\/ ```text\n\/\/ .\/output_vsphere\/example-ubuntu-disk-0.vmdk\n\/\/ .\/output_vsphere\/example-ubuntu.mf\n\/\/ .\/output_vsphere\/example-ubuntu.ovf\n\/\/ ```\ntype ExportConfig struct {\n\t\/\/ name of the ovf. defaults to the name of the VM\n\tName string `mapstructure:\"name\"`\n\t\/\/ overwrite ovf if it exists\n\tForce bool `mapstructure:\"force\"`\n\t\/\/ include iso and img image files that are attached to the VM\n\tImages bool `mapstructure:\"images\"`\n\t\/\/ generate manifest using sha1, sha256, sha512. Defaults to 'sha256'. Use 'none' for no manifest.\n\tManifest string `mapstructure:\"manifest\"`\n\t\/\/ Directory on the computer running Packer to export files to\n\tOutputDir OutputConfig `mapstructure:\",squash\"`\n\t\/\/ Advanced ovf export options. Options can include:\n\t\/\/ * mac - MAC address is exported for all ethernet devices\n\t\/\/ * uuid - UUID is exported for all virtual machines\n\t\/\/ * extraconfig - all extra configuration options are exported for a virtual machine\n\t\/\/ * nodevicesubtypes - resource subtypes for CD\/DVD drives, floppy drives, and serial and parallel ports are not exported\n\t\/\/\n\t\/\/ For example, adding the following export config option would output the mac addresses for all Ethernet devices in the ovf file:\n\t\/\/\n\t\/\/ ```json\n\t\/\/ ...\n\t\/\/ \"export\": {\n\t\/\/ \"options\": [\"mac\"]\n\t\/\/ },\n\t\/\/ ```\n\tOptions []string `mapstructure:\"options\"`\n}\n\nvar sha = map[string]func() hash.Hash{\n\t\"none\": nil,\n\t\"sha1\": sha1.New,\n\t\"sha256\": sha256.New,\n\t\"sha512\": sha512.New,\n}\n\nfunc (c *ExportConfig) Prepare(ctx *interpolate.Context, lc *LocationConfig, pc *common.PackerConfig) []error {\n\tvar errs *packer.MultiError\n\n\terrs = packer.MultiErrorAppend(errs, c.OutputDir.Prepare(ctx, pc)...)\n\n\t\/\/ manifest should default to sha256\n\tif c.Manifest == \"\" {\n\t\tc.Manifest = \"sha256\"\n\t}\n\tif _, ok := sha[c.Manifest]; !ok {\n\t\terrs = packer.MultiErrorAppend(errs, fmt.Errorf(\"unknown hash: %s. available options include available options being 'none', 'sha1', 'sha256', 'sha512'\", c.Manifest))\n\t}\n\n\tif c.Name == \"\" {\n\t\tc.Name = lc.VMName\n\t}\n\ttarget := getTarget(c.OutputDir.OutputDir, c.Name)\n\tif !c.Force {\n\t\tif _, err := os.Stat(target); err == nil {\n\t\t\terrs = packer.MultiErrorAppend(errs, fmt.Errorf(\"file already exists: %s\", target))\n\t\t}\n\t}\n\n\tif err := os.MkdirAll(c.OutputDir.OutputDir, 0750); err != nil {\n\t\terrs = packer.MultiErrorAppend(errs, errors.Wrap(err, \"unable to make directory for export\"))\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn errs.Errors\n\t}\n\n\treturn nil\n}\n\nfunc getTarget(dir string, name string) string {\n\treturn filepath.Join(dir, name+\".ovf\")\n}\n\ntype StepExport struct {\n\tName string\n\tForce bool\n\tImages bool\n\tManifest string\n\tOutputDir string\n\tOptions []string\n\tmf bytes.Buffer\n}\n\nfunc (s *StepExport) Cleanup(multistep.StateBag) {\n}\n\nfunc (s *StepExport) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {\n\tui := state.Get(\"ui\").(packer.Ui)\n\tvm := state.Get(\"vm\").(*driver.VirtualMachine)\n\n\tui.Message(\"Starting export...\")\n\tlease, err := vm.Export()\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"error exporting vm\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tinfo, err := lease.Wait(ctx, nil)\n\tif err != nil {\n\t\tstate.Put(\"error\", err)\n\t\treturn multistep.ActionHalt\n\t}\n\n\tu := lease.StartUpdater(ctx, info)\n\tdefer u.Done()\n\n\tcdp := types.OvfCreateDescriptorParams{\n\t\tName: s.Name,\n\t}\n\n\tm := vm.NewOvfManager()\n\tif len(s.Options) > 0 {\n\t\texportOptions, err := vm.GetOvfExportOptions(m)\n\t\tif err != nil {\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tvar unknown []string\n\t\tfor _, option := range s.Options {\n\t\t\tfound := false\n\t\t\tfor _, exportOpt := range exportOptions {\n\t\t\t\tif exportOpt.Option == option {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tunknown = append(unknown, option)\n\t\t\t}\n\t\t\tcdp.ExportOption = append(cdp.ExportOption, option)\n\t\t}\n\n\t\t\/\/ only printing error message because the unknown options are just ignored by vcenter\n\t\tif len(unknown) > 0 {\n\t\t\tui.Error(fmt.Sprintf(\"unknown export options %s\", strings.Join(unknown, \",\")))\n\t\t}\n\t}\n\n\tfor _, i := range info.Items {\n\t\tif !s.include(&i) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !strings.HasPrefix(i.Path, s.Name) {\n\t\t\ti.Path = s.Name + \"-\" + i.Path\n\t\t}\n\n\t\tfile := i.File()\n\n\t\tui.Message(\"Downloading: \" + file.Path)\n\t\tsize, err := s.Download(ctx, lease, i)\n\t\tif err != nil {\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\t\/\/ Fix file size descriptor\n\t\tfile.Size = size\n\n\t\tui.Message(\"Exporting file: \" + file.Path)\n\t\tcdp.OvfFiles = append(cdp.OvfFiles, file)\n\t}\n\n\tif err = lease.Complete(ctx); err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to complete lease\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tdesc, err := vm.CreateDescriptor(m, cdp)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to create descriptor\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\ttarget := getTarget(s.OutputDir, s.Name)\n\tfile, err := os.Create(target)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to create file: \"+target))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tvar w io.Writer = file\n\th, ok := s.newHash()\n\tif ok {\n\t\tw = io.MultiWriter(file, h)\n\t}\n\n\tui.Message(\"Writing ovf...\")\n\t_, err = io.WriteString(w, desc.OvfDescriptor)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to write descriptor\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tif err = file.Close(); err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to close descriptor\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tif s.Manifest == \"none\" {\n\t\t\/\/ manifest does not need to be created, return\n\t\treturn multistep.ActionContinue\n\t}\n\n\tui.Message(\"Creating manifest...\")\n\ts.addHash(filepath.Base(target), h)\n\n\tfile, err = os.Create(filepath.Join(s.OutputDir, s.Name+\".mf\"))\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to create manifest\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\t_, err = io.Copy(file, &s.mf)\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to write manifest\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\terr = file.Close()\n\tif err != nil {\n\t\tstate.Put(\"error\", errors.Wrap(err, \"unable to close file\"))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tui.Message(\"Finished exporting...\")\n\treturn multistep.ActionContinue\n}\n\nfunc (s *StepExport) include(item *nfc.FileItem) bool {\n\tif s.Images {\n\t\treturn true\n\t}\n\n\treturn filepath.Ext(item.Path) == \".vmdk\"\n}\n\nfunc (s *StepExport) newHash() (hash.Hash, bool) {\n\t\/\/ check if function is nil to handle the 'none' case\n\tif h, ok := sha[s.Manifest]; ok && h != nil {\n\t\treturn h(), true\n\t}\n\n\treturn nil, false\n}\n\nfunc (s *StepExport) addHash(p string, h hash.Hash) {\n\t_, _ = fmt.Fprintf(&s.mf, \"%s(%s)= %x\\n\", strings.ToUpper(s.Manifest), p, h.Sum(nil))\n}\n\nfunc (s *StepExport) Download(ctx context.Context, lease *nfc.Lease, item nfc.FileItem) (int64, error) {\n\tpath := filepath.Join(s.OutputDir, item.Path)\n\topts := soap.Download{}\n\n\tif h, ok := s.newHash(); ok {\n\t\topts.Writer = h\n\t\tdefer s.addHash(item.Path, h)\n\t}\n\n\terr := lease.DownloadFile(ctx, path, item, opts)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tf, err := os.Stat(path)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn f.Size(), err\n}\n<|endoftext|>"} {"text":"<commit_before>package mysql\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/vault\/helper\/strutil\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/logical\/framework\"\n)\n\nconst SecretCredsType = \"creds\"\n\n\/\/ Default Revoke and Drop user SQL statment\n\/\/ Revoking permissions for the user is done before the\n\/\/ drop, because MySQL explicitly documents that open user connections\n\/\/ will not be closed. By revoking all grants, at least we ensure\n\/\/ that the open connection is useless.\n\/\/ Dropping the user will only affect the next connection\n\/\/ This is not a prepared statement because not all commands are supported\n\/\/ 1295: This command is not supported in the prepared statement protocol yet\n\/\/ Reference https:\/\/mariadb.com\/kb\/en\/mariadb\/prepare-statement\/\nconst defaultRevokeSQL = `\nREVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; \nDROP USER '{{name}}'@'%'\n`\n\nfunc secretCreds(b *backend) *framework.Secret {\n\treturn &framework.Secret{\n\t\tType: SecretCredsType,\n\t\tFields: map[string]*framework.FieldSchema{\n\t\t\t\"username\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Username\",\n\t\t\t},\n\n\t\t\t\"password\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Password\",\n\t\t\t},\n\n\t\t\t\"role\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Role\",\n\t\t\t},\n\t\t},\n\n\t\tRenew: b.secretCredsRenew,\n\t\tRevoke: b.secretCredsRevoke,\n\t}\n}\n\nfunc (b *backend) secretCredsRenew(\n\treq *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\t\/\/ Get the lease information\n\tlease, err := b.Lease(req.Storage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lease == nil {\n\t\tlease = &configLease{}\n\t}\n\n\tf := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())\n\treturn f(req, d)\n}\n\nfunc (b *backend) secretCredsRevoke(\n\treq *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\n\t\/\/ Get the username from the internal data\n\tusernameRaw, ok := req.Secret.InternalData[\"username\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"secret is missing username internal data\")\n\t}\n\tusername, ok := usernameRaw.(string)\n\n\t\/\/ Get our connection\n\tdb, err := b.DB(req.Storage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get the role name\n\t\/\/ we may not always have role data in the secret InternalData\n\t\/\/ so don't exit if the roleNameRaw fails. Instead it is set\n\t\/\/ as an empty string\n\tvar roleName string\n\troleNameRaw, ok := req.Secret.InternalData[\"role\"]\n\tif !ok {\n\t\troleName = \"\"\n\t} else {\n\t\troleName, ok = roleNameRaw.(string)\n\t}\n\t\/\/ init default revoke sql string.\n\t\/\/ this will replaced by a user provided one if one exists\n\t\/\/ otherwise this is what will be used when lease is revoked\n\trevokeSQL := defaultRevokeSQL\n\n\t\/\/ if we were successful in finding a role name\n\t\/\/ create role entry from that name\n\tif roleName != \"\" {\n\t\trole, err = b.Role(req.Storage, roleName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Check for a revokeSQL string\n\t\t\/\/ if one exists use that instead of the default\n\t\tif role.RevokeSQL != \"\" {\n\t\t\trevokeSQL = role.RevokeSQL\n\t\t}\n\t}\n\n\t\/\/ Start a transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer tx.Rollback()\n\n\tfor _, query := range strutil.ParseArbitraryStringSlice(revokeSQL, \";\") {\n\t\tquery = strings.TrimSpace(query)\n\t\tif len(query) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ This is not a prepared statement because not all commands are supported\n\t\t\/\/ 1295: This command is not supported in the prepared statement protocol yet\n\t\t\/\/ Reference https:\/\/mariadb.com\/kb\/en\/mariadb\/prepare-statement\/\n\t\tquery = strings.Replace(query, \"{{name}}\", username, -1)\n\t\t_, err = tx.Exec(query)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\t\/\/ Commit the transaction\n\tif err := tx.Commit(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn nil, nil\n}\n<commit_msg>fixed an incorrect assignment<commit_after>package mysql\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/vault\/helper\/strutil\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/logical\/framework\"\n)\n\nconst SecretCredsType = \"creds\"\n\n\/\/ Default Revoke and Drop user SQL statment\n\/\/ Revoking permissions for the user is done before the\n\/\/ drop, because MySQL explicitly documents that open user connections\n\/\/ will not be closed. By revoking all grants, at least we ensure\n\/\/ that the open connection is useless.\n\/\/ Dropping the user will only affect the next connection\n\/\/ This is not a prepared statement because not all commands are supported\n\/\/ 1295: This command is not supported in the prepared statement protocol yet\n\/\/ Reference https:\/\/mariadb.com\/kb\/en\/mariadb\/prepare-statement\/\nconst defaultRevokeSQL = `\nREVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; \nDROP USER '{{name}}'@'%'\n`\n\nfunc secretCreds(b *backend) *framework.Secret {\n\treturn &framework.Secret{\n\t\tType: SecretCredsType,\n\t\tFields: map[string]*framework.FieldSchema{\n\t\t\t\"username\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Username\",\n\t\t\t},\n\n\t\t\t\"password\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Password\",\n\t\t\t},\n\n\t\t\t\"role\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Role\",\n\t\t\t},\n\t\t},\n\n\t\tRenew: b.secretCredsRenew,\n\t\tRevoke: b.secretCredsRevoke,\n\t}\n}\n\nfunc (b *backend) secretCredsRenew(\n\treq *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\t\/\/ Get the lease information\n\tlease, err := b.Lease(req.Storage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lease == nil {\n\t\tlease = &configLease{}\n\t}\n\n\tf := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())\n\treturn f(req, d)\n}\n\nfunc (b *backend) secretCredsRevoke(\n\treq *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\n\t\/\/ Get the username from the internal data\n\tusernameRaw, ok := req.Secret.InternalData[\"username\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"secret is missing username internal data\")\n\t}\n\tusername, ok := usernameRaw.(string)\n\n\t\/\/ Get our connection\n\tdb, err := b.DB(req.Storage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get the role name\n\t\/\/ we may not always have role data in the secret InternalData\n\t\/\/ so don't exit if the roleNameRaw fails. Instead it is set\n\t\/\/ as an empty string.\n\tvar roleName string\n\troleNameRaw, ok := req.Secret.InternalData[\"role\"]\n\tif !ok {\n\t\troleName = \"\"\n\t} else {\n\t\troleName, ok = roleNameRaw.(string)\n\t}\n\t\/\/ init default revoke sql string.\n\t\/\/ this will replaced by a user provided one if one exists\n\t\/\/ otherwise this is what will be used when lease is revoked\n\trevokeSQL := defaultRevokeSQL\n\n\t\/\/ if we were successful in finding a role name\n\t\/\/ create role entry from that name\n\tif roleName != \"\" {\n\t\trole, err := b.Role(req.Storage, roleName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Check for a revokeSQL string\n\t\t\/\/ if one exists use that instead of the default\n\t\tif role.RevokeSQL != \"\" {\n\t\t\trevokeSQL = role.RevokeSQL\n\t\t}\n\t}\n\n\t\/\/ Start a transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer tx.Rollback()\n\n\tfor _, query := range strutil.ParseArbitraryStringSlice(revokeSQL, \";\") {\n\t\tquery = strings.TrimSpace(query)\n\t\tif len(query) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ This is not a prepared statement because not all commands are supported\n\t\t\/\/ 1295: This command is not supported in the prepared statement protocol yet\n\t\t\/\/ Reference https:\/\/mariadb.com\/kb\/en\/mariadb\/prepare-statement\/\n\t\tquery = strings.Replace(query, \"{{name}}\", username, -1)\n\t\t_, err = tx.Exec(query)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\t\/\/ Commit the transaction\n\tif err := tx.Commit(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pdb\n\nimport (\n\t\"unicode\"\n\n\t\"github.com\/TuftsBCB\/seq\"\n)\n\nfunc (m Model) seqAtomsGuess() []*Residue {\n\tseqres := m.Chain.Sequence\n\tmapping := make([]*Residue, len(seqres))\n\tif len(seqres) != len(m.Residues) {\n\t\t\/\/ This is a last ditch effort. Use the ATOM sequence number as an\n\t\t\/\/ index into the SEQRES residues.\n\t\tfor _, r := range m.Residues {\n\t\t\tsi := r.SequenceNum - 1\n\t\t\tif si >= 0 && si < len(seqres) && seqres[si] == r.Name {\n\t\t\t\tmapping[si] = r\n\t\t\t}\n\t\t}\n\t\treturn mapping\n\t}\n\n\tfor i, r := range m.Residues {\n\t\tif i < len(seqres) && seqres[i] == r.Name {\n\t\t\tmapping[i] = r\n\t\t}\n\t}\n\treturn mapping\n}\n\nfunc (m Model) seqAtomsAlign() []*Residue {\n\tseqres := m.Chain.Sequence\n\tatomResidues := make([]seq.Residue, len(m.Residues))\n\tfor i, r := range m.Residues {\n\t\tatomResidues[i] = r.Name\n\t}\n\taligned := seq.NeedlemanWunsch(seqres, atomResidues)\n\n\tmapped := make([]*Residue, len(seqres))\n\tatomi := 0\n\tfor i, r := range aligned.B {\n\t\tif r == '-' {\n\t\t\tmapped[i] = nil\n\t\t} else {\n\t\t\tmapped[i] = m.Residues[atomi]\n\t\t\tatomi++\n\t\t}\n\t}\n\treturn mapped\n}\n\n\/\/ Attempts to accomplish the same thing as seqAtomsWithMissing, but instead\n\/\/ of mapping one residue at a time, we map *chunks* at a time. That is,\n\/\/ a chunk is any group of contiguous residues.\nfunc (m Model) seqAtomsChunksMerge() []*Residue {\n\tseqres := m.Chain.Sequence\n\n\t\/\/ Check to make sure that the total number of missing residues, plus the\n\t\/\/ total number of ATOM record residues equal the total number of residues\n\t\/\/ in the SEQRES records. Otherwise, the merge will fail.\n\tif len(seqres) != len(m.Chain.Missing)+len(m.Residues) {\n\t\treturn m.seqAtomsAlign()\n\t}\n\n\tresult := make([]*Residue, len(seqres))\n\tmchunks := chunk(m.Chain.Missing, m.Residues)\n\tfchunks := chunk(m.Residues, m.Chain.Missing)\n\n\t\/\/ If the PDB file is corrupted, a merge will fail.\n\t\/\/ So we fall back to alignment.\n\tif ok := merge(result, 0, seqres, nil, mchunks, fchunks); !ok {\n\t\treturn m.seqAtomsAlign()\n\t}\n\n\t\/\/ X out any residues that don't have ATOM records.\n\tfor i := range result {\n\t\tif result[i].Atoms == nil {\n\t\t\tresult[i] = nil\n\t\t}\n\t}\n\treturn result\n}\n\nfunc merge(result []*Residue, end int, seqres []seq.Residue,\n\tchunkToMerge []*Residue, mchunks, fchunks [][]*Residue) bool {\n\n\tif chunkToMerge != nil {\n\t\ti := 0\n\t\tfor ; i < len(chunkToMerge); i++ {\n\t\t\tif chunkToMerge[i].Name != seqres[end+i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tresult[end+i] = chunkToMerge[i]\n\t\t}\n\t\tend += i\n\t}\n\tswitch {\n\tcase len(mchunks) == 0 && len(fchunks) == 0:\n\t\treturn true\n\tcase len(mchunks) == 0:\n\t\treturn merge(result, end, seqres, fchunks[0], nil, fchunks[1:])\n\tcase len(fchunks) == 0:\n\t\treturn merge(result, end, seqres, mchunks[0], mchunks[1:], nil)\n\t}\n\n\t\/\/ This is a little weird. We really want to alternate chunks, since\n\t\/\/ a chunk is split presumably where there are holes.\n\t\/\/ But, we don't force it.\n\t\/\/ So we express a preference for it by trying the alternate first, and\n\t\/\/ fallback to similar chunk second.\n\tswitch {\n\tcase len(chunkToMerge) > 0 && chunkToMerge[0].Atoms == nil:\n\t\t\/\/ The current chunk is from missing, so prefer filled.\n\t\treturn merge(result, end, seqres, fchunks[0], mchunks, fchunks[1:]) ||\n\t\t\tmerge(result, end, seqres, mchunks[0], mchunks[1:], fchunks)\n\tcase len(chunkToMerge) > 0 && chunkToMerge[0].Atoms != nil:\n\t\t\/\/ The current chunk is from filled, so prefer missing.\n\t\treturn merge(result, end, seqres, mchunks[0], mchunks[1:], fchunks) ||\n\t\t\tmerge(result, end, seqres, fchunks[0], mchunks, fchunks[1:])\n\t}\n\n\t\/\/ doesn't matter what we do here.\n\t\/\/ (this is the beginning)\n\treturn merge(result, end, seqres, mchunks[0], mchunks[1:], fchunks) ||\n\t\tmerge(result, end, seqres, fchunks[0], mchunks, fchunks[1:])\n}\n\n\/\/ chunk splits up residues in a list of contiguous segments.\nfunc chunk(residues []*Residue, other []*Residue) [][]*Residue {\n\tif len(residues) == 0 {\n\t\treturn nil\n\t}\n\n\tchunks := make([][]*Residue, 0)\n\n\tcur := make([]*Residue, 1)\n\tlast := residues[0]\n\tcur[0] = last\n\tfor i := 1; i < len(residues); i++ {\n\t\tr := residues[i]\n\n\t\tif last.isContiguous(r, other) {\n\t\t\tcur = append(cur, r)\n\t\t\tlast = r\n\t\t\tcontinue\n\t\t}\n\n\t\tchunks = append(chunks, cur)\n\t\tcur = make([]*Residue, 1)\n\t\tcur[0] = r\n\t\tlast = r\n\t}\n\tchunks = append(chunks, cur)\n\n\treturn chunks\n}\n\nfunc (a Residue) less(b Residue) bool {\n\treturn a.SequenceNum < b.SequenceNum ||\n\t\t(a.SequenceNum == b.SequenceNum && a.InsertionCode < b.InsertionCode)\n}\n\nfunc (a Residue) equals(b Residue) bool {\n\treturn a.SequenceNum == b.SequenceNum && a.InsertionCode == b.InsertionCode\n}\n\nfunc (a *Residue) isContiguous(b *Residue, other []*Residue) bool {\n\t\/\/ If any residue in \"other\" is next to a or b, then we cannot claim\n\t\/\/ contiguity.\n\tfor i := range other {\n\t\tif a.SequenceNum == other[i].SequenceNum {\n\t\t\treturn false\n\t\t}\n\t\tif b.SequenceNum == other[i].SequenceNum {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn a.isNext(b)\n}\n\nfunc (a *Residue) isNext(b *Residue) bool {\n\tasn, bsn := a.SequenceNum, b.SequenceNum\n\n\t\/\/ We make the assumption that any two residues with the same sequence\n\t\/\/ number are always contiguous.\n\t\/\/ Note that this has problems. Consider the case with two ATOM records\n\t\/\/ with sequence numbers 36 and 37, but a missing residue with sequence\n\t\/\/ number 36 and insertion code A. So it should go 36 -> 36A -> 37.\n\t\/\/ To remedy this, the caller must make sure that all missing (or filled)\n\t\/\/ residues don't have the same sequence number.\n\tif asn == bsn {\n\t\treturn true\n\t}\n\tif asn+1 == bsn || asn-1 == bsn {\n\t\treturn true\n\t}\n\treturn false\n}\n\ntype residues []*Residue\n\nfunc (rs residues) String() string {\n\tbs := make([]byte, len(rs))\n\tfor i, r := range rs {\n\t\tswitch {\n\t\tcase r == nil:\n\t\t\tbs[i] = '-'\n\t\tcase r.Atoms == nil:\n\t\t\tbs[i] = byte(unicode.ToLower(rune(r.Name)))\n\t\tdefault:\n\t\t\tbs[i] = byte(r.Name)\n\t\t}\n\t}\n\treturn string(bs)\n}\n\ntype gappedResidues []*Residue\n\nfunc (rs gappedResidues) String() string {\n\tbs := make([]byte, len(rs))\n\tfor i, r := range rs {\n\t\tif r == nil || r.Atoms == nil {\n\t\t\tbs[i] = '-'\n\t\t} else {\n\t\t\tbs[i] = byte(r.Name)\n\t\t}\n\t}\n\treturn string(bs)\n}\n<commit_msg>Work with the new interface to Needleman-Wunsch.<commit_after>package pdb\n\nimport (\n\t\"unicode\"\n\n\t\"github.com\/TuftsBCB\/seq\"\n)\n\nfunc (m Model) seqAtomsGuess() []*Residue {\n\tseqres := m.Chain.Sequence\n\tmapping := make([]*Residue, len(seqres))\n\tif len(seqres) != len(m.Residues) {\n\t\t\/\/ This is a last ditch effort. Use the ATOM sequence number as an\n\t\t\/\/ index into the SEQRES residues.\n\t\tfor _, r := range m.Residues {\n\t\t\tsi := r.SequenceNum - 1\n\t\t\tif si >= 0 && si < len(seqres) && seqres[si] == r.Name {\n\t\t\t\tmapping[si] = r\n\t\t\t}\n\t\t}\n\t\treturn mapping\n\t}\n\n\tfor i, r := range m.Residues {\n\t\tif i < len(seqres) && seqres[i] == r.Name {\n\t\t\tmapping[i] = r\n\t\t}\n\t}\n\treturn mapping\n}\n\nfunc (m Model) seqAtomsAlign() []*Residue {\n\tseqres := m.Chain.Sequence\n\tatomResidues := make([]seq.Residue, len(m.Residues))\n\tfor i, r := range m.Residues {\n\t\tatomResidues[i] = r.Name\n\t}\n\taligned := seq.NeedlemanWunsch(seqres, atomResidues, seq.MatBlosum62)\n\n\tmapped := make([]*Residue, len(seqres))\n\tatomi := 0\n\tfor i, r := range aligned.B {\n\t\tif r == '-' {\n\t\t\tmapped[i] = nil\n\t\t} else {\n\t\t\tmapped[i] = m.Residues[atomi]\n\t\t\tatomi++\n\t\t}\n\t}\n\treturn mapped\n}\n\n\/\/ Attempts to accomplish the same thing as seqAtomsWithMissing, but instead\n\/\/ of mapping one residue at a time, we map *chunks* at a time. That is,\n\/\/ a chunk is any group of contiguous residues.\nfunc (m Model) seqAtomsChunksMerge() []*Residue {\n\tseqres := m.Chain.Sequence\n\n\t\/\/ Check to make sure that the total number of missing residues, plus the\n\t\/\/ total number of ATOM record residues equal the total number of residues\n\t\/\/ in the SEQRES records. Otherwise, the merge will fail.\n\tif len(seqres) != len(m.Chain.Missing)+len(m.Residues) {\n\t\treturn m.seqAtomsAlign()\n\t}\n\n\tresult := make([]*Residue, len(seqres))\n\tmchunks := chunk(m.Chain.Missing, m.Residues)\n\tfchunks := chunk(m.Residues, m.Chain.Missing)\n\n\t\/\/ If the PDB file is corrupted, a merge will fail.\n\t\/\/ So we fall back to alignment.\n\tif ok := merge(result, 0, seqres, nil, mchunks, fchunks); !ok {\n\t\treturn m.seqAtomsAlign()\n\t}\n\n\t\/\/ X out any residues that don't have ATOM records.\n\tfor i := range result {\n\t\tif result[i].Atoms == nil {\n\t\t\tresult[i] = nil\n\t\t}\n\t}\n\treturn result\n}\n\nfunc merge(result []*Residue, end int, seqres []seq.Residue,\n\tchunkToMerge []*Residue, mchunks, fchunks [][]*Residue) bool {\n\n\tif chunkToMerge != nil {\n\t\ti := 0\n\t\tfor ; i < len(chunkToMerge); i++ {\n\t\t\tif chunkToMerge[i].Name != seqres[end+i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tresult[end+i] = chunkToMerge[i]\n\t\t}\n\t\tend += i\n\t}\n\tswitch {\n\tcase len(mchunks) == 0 && len(fchunks) == 0:\n\t\treturn true\n\tcase len(mchunks) == 0:\n\t\treturn merge(result, end, seqres, fchunks[0], nil, fchunks[1:])\n\tcase len(fchunks) == 0:\n\t\treturn merge(result, end, seqres, mchunks[0], mchunks[1:], nil)\n\t}\n\n\t\/\/ This is a little weird. We really want to alternate chunks, since\n\t\/\/ a chunk is split presumably where there are holes.\n\t\/\/ But, we don't force it.\n\t\/\/ So we express a preference for it by trying the alternate first, and\n\t\/\/ fallback to similar chunk second.\n\tswitch {\n\tcase len(chunkToMerge) > 0 && chunkToMerge[0].Atoms == nil:\n\t\t\/\/ The current chunk is from missing, so prefer filled.\n\t\treturn merge(result, end, seqres, fchunks[0], mchunks, fchunks[1:]) ||\n\t\t\tmerge(result, end, seqres, mchunks[0], mchunks[1:], fchunks)\n\tcase len(chunkToMerge) > 0 && chunkToMerge[0].Atoms != nil:\n\t\t\/\/ The current chunk is from filled, so prefer missing.\n\t\treturn merge(result, end, seqres, mchunks[0], mchunks[1:], fchunks) ||\n\t\t\tmerge(result, end, seqres, fchunks[0], mchunks, fchunks[1:])\n\t}\n\n\t\/\/ doesn't matter what we do here.\n\t\/\/ (this is the beginning)\n\treturn merge(result, end, seqres, mchunks[0], mchunks[1:], fchunks) ||\n\t\tmerge(result, end, seqres, fchunks[0], mchunks, fchunks[1:])\n}\n\n\/\/ chunk splits up residues in a list of contiguous segments.\nfunc chunk(residues []*Residue, other []*Residue) [][]*Residue {\n\tif len(residues) == 0 {\n\t\treturn nil\n\t}\n\n\tchunks := make([][]*Residue, 0)\n\n\tcur := make([]*Residue, 1)\n\tlast := residues[0]\n\tcur[0] = last\n\tfor i := 1; i < len(residues); i++ {\n\t\tr := residues[i]\n\n\t\tif last.isContiguous(r, other) {\n\t\t\tcur = append(cur, r)\n\t\t\tlast = r\n\t\t\tcontinue\n\t\t}\n\n\t\tchunks = append(chunks, cur)\n\t\tcur = make([]*Residue, 1)\n\t\tcur[0] = r\n\t\tlast = r\n\t}\n\tchunks = append(chunks, cur)\n\n\treturn chunks\n}\n\nfunc (a Residue) less(b Residue) bool {\n\treturn a.SequenceNum < b.SequenceNum ||\n\t\t(a.SequenceNum == b.SequenceNum && a.InsertionCode < b.InsertionCode)\n}\n\nfunc (a Residue) equals(b Residue) bool {\n\treturn a.SequenceNum == b.SequenceNum && a.InsertionCode == b.InsertionCode\n}\n\nfunc (a *Residue) isContiguous(b *Residue, other []*Residue) bool {\n\t\/\/ If any residue in \"other\" is next to a or b, then we cannot claim\n\t\/\/ contiguity.\n\tfor i := range other {\n\t\tif a.SequenceNum == other[i].SequenceNum {\n\t\t\treturn false\n\t\t}\n\t\tif b.SequenceNum == other[i].SequenceNum {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn a.isNext(b)\n}\n\nfunc (a *Residue) isNext(b *Residue) bool {\n\tasn, bsn := a.SequenceNum, b.SequenceNum\n\n\t\/\/ We make the assumption that any two residues with the same sequence\n\t\/\/ number are always contiguous.\n\t\/\/ Note that this has problems. Consider the case with two ATOM records\n\t\/\/ with sequence numbers 36 and 37, but a missing residue with sequence\n\t\/\/ number 36 and insertion code A. So it should go 36 -> 36A -> 37.\n\t\/\/ To remedy this, the caller must make sure that all missing (or filled)\n\t\/\/ residues don't have the same sequence number.\n\tif asn == bsn {\n\t\treturn true\n\t}\n\tif asn+1 == bsn || asn-1 == bsn {\n\t\treturn true\n\t}\n\treturn false\n}\n\ntype residues []*Residue\n\nfunc (rs residues) String() string {\n\tbs := make([]byte, len(rs))\n\tfor i, r := range rs {\n\t\tswitch {\n\t\tcase r == nil:\n\t\t\tbs[i] = '-'\n\t\tcase r.Atoms == nil:\n\t\t\tbs[i] = byte(unicode.ToLower(rune(r.Name)))\n\t\tdefault:\n\t\t\tbs[i] = byte(r.Name)\n\t\t}\n\t}\n\treturn string(bs)\n}\n\ntype gappedResidues []*Residue\n\nfunc (rs gappedResidues) String() string {\n\tbs := make([]byte, len(rs))\n\tfor i, r := range rs {\n\t\tif r == nil || r.Atoms == nil {\n\t\t\tbs[i] = '-'\n\t\t} else {\n\t\t\tbs[i] = byte(r.Name)\n\t\t}\n\t}\n\treturn string(bs)\n}\n<|endoftext|>"} {"text":"<commit_before>package checklog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n\/\/ Format implements fmt.Formatter.\nfunc (s *state) Format(f fmt.State, c rune) {\n\tif s == nil {\n\t\tfmt.Fprintf(f, \"<nil>\")\n\t\treturn\n\t}\n\tfmt.Printf(\"%\"+string(c), *s)\n}\n\nfunc TestLoadStateIfFileNotExist(t *testing.T) {\n\tfile := \"testdata\/file_not_found\"\n\ts, err := loadState(file)\n\tif err != nil {\n\t\tt.Errorf(\"loadState(%q) = %v; want nil\", file, err)\n\t}\n\tif s != nil {\n\t\tt.Errorf(\"loadState(%q) = %v; want nil\", file, *s)\n\t}\n}\n\nfunc TestLoadStateIfAccessDenied(t *testing.T) {\n\tfile := \"testdata\/file.txt\/any\"\n\ts, err := loadState(file)\n\tif err == nil {\n\t\tt.Errorf(\"loadState(%q) = %v; want an error\", file, s)\n\t}\n}\n\nfunc TestSaveStateIfFileNotExist(t *testing.T) {\n\tfile := \"testdata\/file_will_create\"\n\tdefer func() {\n\t\terr := os.Remove(file)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\ts := &state{\n\t\tSkipBytes: 15,\n\t\tInode: 150,\n\t}\n\ttestSaveLoadState(t, file, s)\n}\n\nfunc TestSaveStateOverwrittenIfFileExist(t *testing.T) {\n\tfile := \"testdata\/state_overwritten\"\n\tdefer func() {\n\t\terr := os.Remove(file)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\terr := ioutil.WriteFile(file, []byte(`{\"skip_bytes\": 10, \"inode\": 100}`), 0644)\n\tif err != nil {\n\t\tt.Errorf(\"WriteFile: %v\", err)\n\t\treturn\n\t}\n\ts := &state{\n\t\tSkipBytes: 15,\n\t\tInode: 150,\n\t}\n\ttestSaveLoadState(t, file, s)\n}\n\nfunc testSaveLoadState(t *testing.T, file string, s *state) {\n\tt.Helper()\n\n\tif err := saveState(file, s); err != nil {\n\t\tt.Errorf(\"saveState(%v) = %v; want nil\", file, *s)\n\t\treturn\n\t}\n\ts1, err := loadState(file)\n\tif err != nil {\n\t\tt.Errorf(\"loadState: %v\", err)\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(s, s1) {\n\t\tt.Errorf(\"saveState(%v) -> loadState() = %v\", s, s1)\n\t}\n}\n\nfunc TestSaveStateIfAccessDenied(t *testing.T) {\n\tfile := \"testdata\/readonly\/state\"\n\tdir := filepath.Dir(file)\n\tdefer func() {\n\t\tif err := os.Chmod(dir, 0755); err != nil {\n\t\t\tt.Fatalf(\"Chmod: %v\", err)\n\t\t}\n\t\terr := os.RemoveAll(dir)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tt.Fatalf(\"RemoveAll: %v\", err)\n\t\t}\n\t}()\n\n\tif err := os.MkdirAll(dir, 0700); err != nil {\n\t\tt.Errorf(\"MkdirAll: %v\", err)\n\t\treturn\n\t}\n\tdata := []byte(`{\"skip_bytes\": 10, \"inode\": 100}`)\n\tif err := ioutil.WriteFile(file, data, 0644); err != nil {\n\t\tt.Errorf(\"WriteFile: %v\", err)\n\t\treturn\n\t}\n\tif err := os.Chmod(dir, 0500); err != nil {\n\t\tt.Errorf(\"Chmod: %v\", err)\n\t\treturn\n\t}\n\ts := &state{\n\t\tSkipBytes: 15,\n\t\tInode: 150,\n\t}\n\tsaveState(file, s) \/\/ an error can be ignored in this case.\n\tdata1, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tt.Errorf(\"ReadFile: %v\", err)\n\t\treturn\n\t}\n\tif !bytes.Equal(data1, data) {\n\t\tt.Errorf(\"saveState into readonly directory should keep original contents: result = %s\", data1)\n\t}\n}\n<commit_msg>[check-log] add a TODO comment<commit_after>package checklog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n\/\/ Format implements fmt.Formatter.\nfunc (s *state) Format(f fmt.State, c rune) {\n\tif s == nil {\n\t\tfmt.Fprintf(f, \"<nil>\")\n\t\treturn\n\t}\n\tfmt.Printf(\"%\"+string(c), *s)\n}\n\nfunc TestLoadStateIfFileNotExist(t *testing.T) {\n\tfile := \"testdata\/file_not_found\"\n\ts, err := loadState(file)\n\tif err != nil {\n\t\tt.Errorf(\"loadState(%q) = %v; want nil\", file, err)\n\t}\n\tif s != nil {\n\t\tt.Errorf(\"loadState(%q) = %v; want nil\", file, *s)\n\t}\n}\n\nfunc TestLoadStateIfAccessDenied(t *testing.T) {\n\tfile := \"testdata\/file.txt\/any\"\n\ts, err := loadState(file)\n\tif err == nil {\n\t\tt.Errorf(\"loadState(%q) = %v; want an error\", file, s)\n\t}\n}\n\n\/\/ TODO(lufia): We might be better to test a condition too that file is exist but loadState can't read it.\n\nfunc TestSaveStateIfFileNotExist(t *testing.T) {\n\tfile := \"testdata\/file_will_create\"\n\tdefer func() {\n\t\terr := os.Remove(file)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\ts := &state{\n\t\tSkipBytes: 15,\n\t\tInode: 150,\n\t}\n\ttestSaveLoadState(t, file, s)\n}\n\nfunc TestSaveStateOverwrittenIfFileExist(t *testing.T) {\n\tfile := \"testdata\/state_overwritten\"\n\tdefer func() {\n\t\terr := os.Remove(file)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\terr := ioutil.WriteFile(file, []byte(`{\"skip_bytes\": 10, \"inode\": 100}`), 0644)\n\tif err != nil {\n\t\tt.Errorf(\"WriteFile: %v\", err)\n\t\treturn\n\t}\n\ts := &state{\n\t\tSkipBytes: 15,\n\t\tInode: 150,\n\t}\n\ttestSaveLoadState(t, file, s)\n}\n\nfunc testSaveLoadState(t *testing.T, file string, s *state) {\n\tt.Helper()\n\n\tif err := saveState(file, s); err != nil {\n\t\tt.Errorf(\"saveState(%v) = %v; want nil\", file, *s)\n\t\treturn\n\t}\n\ts1, err := loadState(file)\n\tif err != nil {\n\t\tt.Errorf(\"loadState: %v\", err)\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(s, s1) {\n\t\tt.Errorf(\"saveState(%v) -> loadState() = %v\", s, s1)\n\t}\n}\n\nfunc TestSaveStateIfAccessDenied(t *testing.T) {\n\tfile := \"testdata\/readonly\/state\"\n\tdir := filepath.Dir(file)\n\tdefer func() {\n\t\tif err := os.Chmod(dir, 0755); err != nil {\n\t\t\tt.Fatalf(\"Chmod: %v\", err)\n\t\t}\n\t\terr := os.RemoveAll(dir)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tt.Fatalf(\"RemoveAll: %v\", err)\n\t\t}\n\t}()\n\n\tif err := os.MkdirAll(dir, 0700); err != nil {\n\t\tt.Errorf(\"MkdirAll: %v\", err)\n\t\treturn\n\t}\n\tdata := []byte(`{\"skip_bytes\": 10, \"inode\": 100}`)\n\tif err := ioutil.WriteFile(file, data, 0644); err != nil {\n\t\tt.Errorf(\"WriteFile: %v\", err)\n\t\treturn\n\t}\n\tif err := os.Chmod(dir, 0500); err != nil {\n\t\tt.Errorf(\"Chmod: %v\", err)\n\t\treturn\n\t}\n\ts := &state{\n\t\tSkipBytes: 15,\n\t\tInode: 150,\n\t}\n\tsaveState(file, s) \/\/ an error can be ignored in this case.\n\tdata1, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tt.Errorf(\"ReadFile: %v\", err)\n\t\treturn\n\t}\n\tif !bytes.Equal(data1, data) {\n\t\tt.Errorf(\"saveState into readonly directory should keep original contents: result = %s\", data1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build ignore\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bgv\/workerpool\"\n)\n\nfunc main() {\n\t\/\/ Number of workers, and Size of the job queue\n\tsimplepool := workerpool.New(10, 50)\n\n\t\/\/ create and submit 100 jobs to the pool\n\tfor i := 0; i < 100; i++ {\n\t\tcount := i\n\n\t\tsimplepool.JobQueue <- func() {\n\t\t\tfmt.Printf(\"I am job! Number %d\\n\", count)\n\t\t}\n\t}\n\n\t\/\/ Wait for all jobs to finish and stop the workers\n\tsimplepool.Stop()\n}\n<commit_msg>Update example<commit_after>\/\/ +build ignore\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bgv\/workerpool\"\n)\n\nfunc main() {\n\t\/\/ Number of workers, and Size of the job queue\n\tsimplepool := workerpool.New(10, 50)\n\n\t\/\/ create and submit 100 jobs to the pool\n\tfor i := 0; i < 100; i++ {\n\t\tcount := i\n\n\t\tsimplepool.AddJob(func() {\n\t\t\tfmt.Printf(\"I am job! Number %d\\n\", count)\n\t\t\tsimplepool.JobDone()\n\t\t})\n\t}\n\n\t\/\/ Wait for all jobs to finish and stop the workers.\n\tsimplepool.Stop()\n}\n<|endoftext|>"} {"text":"<commit_before>package volumes\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/daemon\/graphdriver\"\n\t\"github.com\/docker\/docker\/utils\"\n)\n\ntype Repository struct {\n\tconfigPath string\n\tdriver graphdriver.Driver\n\tvolumes map[string]*Volume\n\tlock sync.Mutex\n}\n\nfunc NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {\n\tabspath, err := filepath.Abs(configPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create the config path\n\tif err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {\n\t\treturn nil, err\n\t}\n\n\trepo := &Repository{\n\t\tdriver: driver,\n\t\tconfigPath: abspath,\n\t\tvolumes: make(map[string]*Volume),\n\t}\n\n\treturn repo, repo.restore()\n}\n\nfunc (r *Repository) newVolume(path string, writable bool) (*Volume, error) {\n\tvar (\n\t\tisBindMount bool\n\t\terr error\n\t\tid = utils.GenerateRandomID()\n\t)\n\tif path != \"\" {\n\t\tisBindMount = true\n\t}\n\n\tif path == \"\" {\n\t\tpath, err = r.createNewVolumePath(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpath = filepath.Clean(path)\n\n\tpath, err = filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv := &Volume{\n\t\tID: id,\n\t\tPath: path,\n\t\trepository: r,\n\t\tWritable: writable,\n\t\tcontainers: make(map[string]struct{}),\n\t\tconfigPath: r.configPath + \"\/\" + id,\n\t\tIsBindMount: isBindMount,\n\t}\n\n\tif err := v.initialize(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn v, r.add(v)\n}\n\nfunc (r *Repository) restore() error {\n\tdir, err := ioutil.ReadDir(r.configPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range dir {\n\t\tid := v.Name()\n\t\tpath, err := r.driver.Get(id, \"\")\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Could not find volume for %s: %v\", id, err)\n\t\t\tcontinue\n\t\t}\n\t\tvol := &Volume{\n\t\t\tID: id,\n\t\t\tconfigPath: r.configPath + \"\/\" + id,\n\t\t\tcontainers: make(map[string]struct{}),\n\t\t\tPath: path,\n\t\t}\n\t\tif err := vol.FromDisk(); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tlog.Debugf(\"Error restoring volume: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := vol.initialize(); err != nil {\n\t\t\t\tlog.Debugf(\"%s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := r.add(vol); err != nil {\n\t\t\tlog.Debugf(\"Error restoring volume: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *Repository) Get(path string) *Volume {\n\tr.lock.Lock()\n\tvol := r.get(path)\n\tr.lock.Unlock()\n\treturn vol\n}\n\nfunc (r *Repository) get(path string) *Volume {\n\tpath, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn r.volumes[filepath.Clean(path)]\n}\n\nfunc (r *Repository) Add(volume *Volume) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.add(volume)\n}\n\nfunc (r *Repository) add(volume *Volume) error {\n\tif vol := r.get(volume.Path); vol != nil {\n\t\treturn fmt.Errorf(\"Volume exists: %s\", volume.ID)\n\t}\n\tr.volumes[volume.Path] = volume\n\treturn nil\n}\n\nfunc (r *Repository) Remove(volume *Volume) {\n\tr.lock.Lock()\n\tr.remove(volume)\n\tr.lock.Unlock()\n}\n\nfunc (r *Repository) remove(volume *Volume) {\n\tdelete(r.volumes, volume.Path)\n}\n\nfunc (r *Repository) Delete(path string) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\tpath, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvolume := r.get(filepath.Clean(path))\n\tif volume == nil {\n\t\treturn fmt.Errorf(\"Volume %s does not exist\", path)\n\t}\n\n\tif volume.IsBindMount {\n\t\treturn fmt.Errorf(\"Volume %s is a bind-mount and cannot be removed\", volume.Path)\n\t}\n\tcontainers := volume.Containers()\n\tif len(containers) > 0 {\n\t\treturn fmt.Errorf(\"Volume %s is being used and cannot be removed: used by containers %s\", volume.Path, containers)\n\t}\n\n\tif err := os.RemoveAll(volume.configPath); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.driver.Remove(volume.ID); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.remove(volume)\n\treturn nil\n}\n\nfunc (r *Repository) createNewVolumePath(id string) (string, error) {\n\tif err := r.driver.Create(id, \"\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpath, err := r.driver.Get(id, \"\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Driver %s failed to get volume rootfs %s: %v\", r.driver, id, err)\n\t}\n\n\treturn path, nil\n}\n\nfunc (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tif path == \"\" {\n\t\treturn r.newVolume(path, writable)\n\t}\n\n\tif v := r.get(path); v != nil {\n\t\treturn v, nil\n\t}\n\n\treturn r.newVolume(path, writable)\n}\n<commit_msg>Clean config path of bind mount volume<commit_after>package volumes\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/daemon\/graphdriver\"\n\t\"github.com\/docker\/docker\/utils\"\n)\n\ntype Repository struct {\n\tconfigPath string\n\tdriver graphdriver.Driver\n\tvolumes map[string]*Volume\n\tlock sync.Mutex\n}\n\nfunc NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {\n\tabspath, err := filepath.Abs(configPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create the config path\n\tif err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {\n\t\treturn nil, err\n\t}\n\n\trepo := &Repository{\n\t\tdriver: driver,\n\t\tconfigPath: abspath,\n\t\tvolumes: make(map[string]*Volume),\n\t}\n\n\treturn repo, repo.restore()\n}\n\nfunc (r *Repository) newVolume(path string, writable bool) (*Volume, error) {\n\tvar (\n\t\tisBindMount bool\n\t\terr error\n\t\tid = utils.GenerateRandomID()\n\t)\n\tif path != \"\" {\n\t\tisBindMount = true\n\t}\n\n\tif path == \"\" {\n\t\tpath, err = r.createNewVolumePath(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpath = filepath.Clean(path)\n\n\tpath, err = filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv := &Volume{\n\t\tID: id,\n\t\tPath: path,\n\t\trepository: r,\n\t\tWritable: writable,\n\t\tcontainers: make(map[string]struct{}),\n\t\tconfigPath: r.configPath + \"\/\" + id,\n\t\tIsBindMount: isBindMount,\n\t}\n\n\tif err := v.initialize(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn v, r.add(v)\n}\n\nfunc (r *Repository) restore() error {\n\tdir, err := ioutil.ReadDir(r.configPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range dir {\n\t\tid := v.Name()\n\t\tpath, err := r.driver.Get(id, \"\")\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Could not find volume for %s: %v\", id, err)\n\t\t\tcontinue\n\t\t}\n\t\tvol := &Volume{\n\t\t\tID: id,\n\t\t\tconfigPath: r.configPath + \"\/\" + id,\n\t\t\tcontainers: make(map[string]struct{}),\n\t\t\tPath: path,\n\t\t}\n\t\tif err := vol.FromDisk(); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tlog.Debugf(\"Error restoring volume: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := vol.initialize(); err != nil {\n\t\t\t\tlog.Debugf(\"%s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := r.add(vol); err != nil {\n\t\t\tlog.Debugf(\"Error restoring volume: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *Repository) Get(path string) *Volume {\n\tr.lock.Lock()\n\tvol := r.get(path)\n\tr.lock.Unlock()\n\treturn vol\n}\n\nfunc (r *Repository) get(path string) *Volume {\n\tpath, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn r.volumes[filepath.Clean(path)]\n}\n\nfunc (r *Repository) Add(volume *Volume) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.add(volume)\n}\n\nfunc (r *Repository) add(volume *Volume) error {\n\tif vol := r.get(volume.Path); vol != nil {\n\t\treturn fmt.Errorf(\"Volume exists: %s\", volume.ID)\n\t}\n\tr.volumes[volume.Path] = volume\n\treturn nil\n}\n\nfunc (r *Repository) Remove(volume *Volume) {\n\tr.lock.Lock()\n\tr.remove(volume)\n\tr.lock.Unlock()\n}\n\nfunc (r *Repository) remove(volume *Volume) {\n\tdelete(r.volumes, volume.Path)\n}\n\nfunc (r *Repository) Delete(path string) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\tpath, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvolume := r.get(filepath.Clean(path))\n\tif volume == nil {\n\t\treturn fmt.Errorf(\"Volume %s does not exist\", path)\n\t}\n\n\tcontainers := volume.Containers()\n\tif len(containers) > 0 {\n\t\treturn fmt.Errorf(\"Volume %s is being used and cannot be removed: used by containers %s\", volume.Path, containers)\n\t}\n\n\tif err := os.RemoveAll(volume.configPath); err != nil {\n\t\treturn err\n\t}\n\n\tif volume.IsBindMount {\n\t\treturn nil\n\t}\n\n\tif err := r.driver.Remove(volume.ID); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.remove(volume)\n\treturn nil\n}\n\nfunc (r *Repository) createNewVolumePath(id string) (string, error) {\n\tif err := r.driver.Create(id, \"\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpath, err := r.driver.Get(id, \"\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Driver %s failed to get volume rootfs %s: %v\", r.driver, id, err)\n\t}\n\n\treturn path, nil\n}\n\nfunc (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tif path == \"\" {\n\t\treturn r.newVolume(path, writable)\n\t}\n\n\tif v := r.get(path); v != nil {\n\t\treturn v, nil\n\t}\n\n\treturn r.newVolume(path, writable)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage storage\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\nvar (\n\tsession *mgo.Session\n\tmut sync.RWMutex\n)\n\n\/\/ Storage holds the connection with the database.\ntype Storage struct {\n\tsession *mgo.Session\n\tdbname string\n}\n\n\/\/ Collection represents a database collection. It embeds mgo.Collection for\n\/\/ operations, and holds a session to MongoDB. The user may close the session\n\/\/ using the method close.\ntype Collection struct {\n\t*mgo.Collection\n}\n\n\/\/ Close closes the session with the database.\nfunc (c *Collection) Close() {\n\tc.Collection.Database.Session.Close()\n}\n\nfunc open(addr, dbname string) (*Storage, error) {\n\tif session == nil {\n\t\tvar err error\n\t\tmut.Lock()\n\t\tsession, err = mgo.Dial(addr)\n\t\tmut.Unlock()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"mongodb: %s\", err)\n\t\t}\n\t}\n\tcopy := session.Clone()\n\tstorage := &Storage{session: copy, dbname: dbname}\n\treturn storage, nil\n}\n\n\/\/ Open dials to the MongoDB database, and return the connection (represented\n\/\/ by the type Storage).\n\/\/\n\/\/ addr is a MongoDB connection URI, and dbname is the name of the database.\n\/\/\n\/\/ This function returns a pointer to a Storage, or a non-nil error in case of\n\/\/ any failure.\nfunc Open(addr, dbname string) (storage *Storage, err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tstorage, err = open(addr, dbname)\n\t\t}\n\t}()\n\tif err = session.Ping(); err != nil {\n\t\tmut.Lock()\n\t\tsession = nil\n\t\tmut.Unlock()\n\t}\n\treturn open(addr, dbname)\n}\n\n\/\/ Close closes the storage, releasing the connection.\nfunc (s *Storage) Close() {\n\ts.session.Close()\n}\n\n\/\/ Collection returns a collection by its name.\n\/\/\n\/\/ If the collection does not exist, MongoDB will create it.\nfunc (s *Storage) Collection(name string) *Collection {\n\treturn &Collection{s.session.DB(s.dbname).C(name)}\n}\n<commit_msg>db\/storage: further improvements in db connection<commit_after>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage storage\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\nvar (\n\tsession *mgo.Session\n\tsessionLock sync.RWMutex\n)\n\n\/\/ Storage holds the connection with the database.\ntype Storage struct {\n\tsession *mgo.Session\n\tdbname string\n}\n\n\/\/ Collection represents a database collection. It embeds mgo.Collection for\n\/\/ operations, and holds a session to MongoDB. The user may close the session\n\/\/ using the method close.\ntype Collection struct {\n\t*mgo.Collection\n}\n\n\/\/ Close closes the session with the database.\nfunc (c *Collection) Close() {\n\tc.Collection.Database.Session.Close()\n}\n\nfunc open(addr, dbname string) (*mgo.Session, error) {\n\tdialInfo, err := mgo.ParseURL(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdialInfo.FailFast = true\n\tsession, err := mgo.DialWithInfo(dialInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession.SetSyncTimeout(10 * time.Second)\n\tsession.SetSocketTimeout(1 * time.Minute)\n\tsession.SetMode(mgo.Monotonic, true)\n\treturn session, nil\n}\n\n\/\/ Open dials to the MongoDB database, and return the connection (represented\n\/\/ by the type Storage).\n\/\/\n\/\/ addr is a MongoDB connection URI, and dbname is the name of the database.\n\/\/\n\/\/ This function returns a pointer to a Storage, or a non-nil error in case of\n\/\/ any failure.\nfunc Open(addr, dbname string) (storage *Storage, err error) {\n\tsessionLock.RLock()\n\tif session == nil {\n\t\tsessionLock.RUnlock()\n\t\tsessionLock.Lock()\n\t\tif session == nil {\n\t\t\tsession, err = open(addr, dbname)\n\t\t}\n\t\tsessionLock.Unlock()\n\t} else {\n\t\tsessionLock.RUnlock()\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tstorage = &Storage{\n\t\tsession: session.Copy(),\n\t\tdbname: dbname,\n\t}\n\treturn\n}\n\n\/\/ Close closes the storage, releasing the connection.\nfunc (s *Storage) Close() {\n\ts.session.Close()\n}\n\n\/\/ Collection returns a collection by its name.\n\/\/\n\/\/ If the collection does not exist, MongoDB will create it.\nfunc (s *Storage) Collection(name string) *Collection {\n\treturn &Collection{s.session.DB(s.dbname).C(name)}\n}\n<|endoftext|>"} {"text":"<commit_before>package portal\n\nimport (\n\t\"github.com\/Cepave\/open-falcon-backend\/modules\/fe\/http\/base\"\n\tevent \"github.com\/Cepave\/open-falcon-backend\/modules\/fe\/model\/falcon_portal\"\n)\n\ntype PortalController struct {\n\tbase.BaseController\n}\n\nfunc (this *PortalController) GetEventCases() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tstartTime, _ := this.GetInt64(\"startTime\", 0)\n\tendTime, _ := this.GetInt64(\"endTime\", 0)\n\tprioprity, _ := this.GetInt(\"prioprity\", -1)\n\tstatus := this.GetString(\"status\", \"ALL\")\n\tprocessStatus := this.GetString(\"process_status\", \"ALL\")\n\tmetrics := this.GetString(\"metrics\", \"ALL\")\n\n\tusername := this.GetString(\"cName\", \"\")\n\tlimitNum, _ := this.GetInt(\"limit\", 0)\n\telimit, _ := this.GetInt(\"elimit\", 0)\n\tcaseId := this.GetString(\"caseId\", \"\")\n\tevents, err := event.GetEventCases(startTime, endTime, prioprity, status, processStatus, limitNum, elimit, username, metrics, caseId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"eventCases\"] = events\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) GetEvent() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tstartTime, _ := this.GetInt64(\"startTime\", 0)\n\tendTime, _ := this.GetInt64(\"endTime\", 0)\n\tstatus := this.GetString(\"status\", \"ALL\")\n\tlimit, _ := this.GetInt(\"limit\", 0)\n\tcaseId := this.GetString(\"caseId\", \"\")\n\tevents, err := event.GetEvents(startTime, endTime, status, limit, caseId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"events\"] = events\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\n\/\/will deprecated\nfunc (this *PortalController) ColseCase() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tusername := this.GetString(\"cName\", \"\")\n\tclosedNote := this.GetString(\"closedNote\", \"\")\n\tid := this.GetString(\"id\", \"xxx\")\n\tswitch {\n\tcase id == \"xxx\":\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\tcase closedNote == \"\":\n\t\tthis.ResposeError(baseResp, \"You can not skip closed note\")\n\t\treturn\n\t}\n\terr = event.CloseEvent(username, closedNote, id)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) AddNote() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tusername := this.GetString(\"cName\", \"\")\n\tnote := this.GetString(\"note\", \"\")\n\tid := this.GetString(\"id\", \"xxx\")\n\tstatus := this.GetString(\"status\", \"\")\n\tbossId := this.GetString(\"caseId\", \"\")\n\tswitch {\n\tcase id == \"xxx\":\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\tcase note == \"\":\n\t\tthis.ResposeError(baseResp, \"You can not skip closed note\")\n\t\treturn\n\t}\n\terr = event.AddNote(username, note, id, status, bossId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) BatchUpdateNote() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tusername := this.GetString(\"cName\", \"\")\n\tnote := this.GetString(\"note\", \"\")\n\tids := this.GetString(\"ids\", \"[]\")\n\tstatus := this.GetString(\"status\", \"ignored\")\n\tif status == \"ignored\" && note == \"\" {\n\t\tnote = \"ignored by ignored api.\"\n\t}\n\tbossId := this.GetString(\"caseIds\", \"\")\n\tswitch {\n\tcase ids == \"[]\":\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\tcase note == \"\":\n\t\tthis.ResposeError(baseResp, \"You can not skip closed note\")\n\t\treturn\n\t}\n\terr = event.AddNote(username, note, ids, status, bossId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) GetNotes() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tid := this.GetString(\"id\", \"xxx\")\n\tlimitNum, _ := this.GetInt(\"limit\", 0)\n\tstartTime, _ := this.GetInt64(\"startTime\", 0)\n\tendTime, _ := this.GetInt64(\"endTime\", 0)\n\tfilterIgnored, _ := this.GetBool(\"filterIgnored\", false)\n\tif id == \"xxx\" {\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\t}\n\tnotes, err := event.GetNotes(id, limitNum, startTime, endTime, filterIgnored)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"notes\"] = notes\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) GetNote() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tid, _ := this.GetInt64(\"id\", 0)\n\tif id == 0 {\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any note id\")\n\t\treturn\n\t}\n\tnote, err := event.GetNote(id)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"note\"] = note\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) CountNumOfTlp() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t} else {\n\t\tnumberOfteam, err := event.CountNumOfTlp()\n\t\tif err != nil {\n\t\t\tthis.ResposeError(baseResp, err.Error())\n\t\t\treturn\n\t\t}\n\t\tbaseResp.Data[\"count\"] = numberOfteam\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n<commit_msg>support when updated note on a alarm_cases , can return the natwest notes list as the response contain<commit_after>package portal\n\nimport (\n\t\"github.com\/Cepave\/open-falcon-backend\/modules\/fe\/http\/base\"\n\tevent \"github.com\/Cepave\/open-falcon-backend\/modules\/fe\/model\/falcon_portal\"\n)\n\ntype PortalController struct {\n\tbase.BaseController\n}\n\nfunc (this *PortalController) GetEventCases() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tstartTime, _ := this.GetInt64(\"startTime\", 0)\n\tendTime, _ := this.GetInt64(\"endTime\", 0)\n\tprioprity, _ := this.GetInt(\"prioprity\", -1)\n\tstatus := this.GetString(\"status\", \"ALL\")\n\tprocessStatus := this.GetString(\"process_status\", \"ALL\")\n\tmetrics := this.GetString(\"metrics\", \"ALL\")\n\n\tusername := this.GetString(\"cName\", \"\")\n\tlimitNum, _ := this.GetInt(\"limit\", 0)\n\telimit, _ := this.GetInt(\"elimit\", 0)\n\tcaseId := this.GetString(\"caseId\", \"\")\n\tevents, err := event.GetEventCases(startTime, endTime, prioprity, status, processStatus, limitNum, elimit, username, metrics, caseId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"eventCases\"] = events\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) GetEvent() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tstartTime, _ := this.GetInt64(\"startTime\", 0)\n\tendTime, _ := this.GetInt64(\"endTime\", 0)\n\tstatus := this.GetString(\"status\", \"ALL\")\n\tlimit, _ := this.GetInt(\"limit\", 0)\n\tcaseId := this.GetString(\"caseId\", \"\")\n\tevents, err := event.GetEvents(startTime, endTime, status, limit, caseId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"events\"] = events\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\n\/\/will deprecated\nfunc (this *PortalController) ColseCase() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tusername := this.GetString(\"cName\", \"\")\n\tclosedNote := this.GetString(\"closedNote\", \"\")\n\tid := this.GetString(\"id\", \"xxx\")\n\tswitch {\n\tcase id == \"xxx\":\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\tcase closedNote == \"\":\n\t\tthis.ResposeError(baseResp, \"You can not skip closed note\")\n\t\treturn\n\t}\n\terr = event.CloseEvent(username, closedNote, id)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) AddNote() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tusername := this.GetString(\"cName\", \"\")\n\tnote := this.GetString(\"note\", \"\")\n\tid := this.GetString(\"id\", \"xxx\")\n\tstatus := this.GetString(\"status\", \"\")\n\tbossId := this.GetString(\"caseId\", \"\")\n\tswitch {\n\tcase id == \"xxx\":\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\tcase note == \"\":\n\t\tthis.ResposeError(baseResp, \"You can not skip closed note\")\n\t\treturn\n\t}\n\terr = event.AddNote(username, note, id, status, bossId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tnotes, err := event.GetNotes(id, 0, 0, 0, false)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"notes\"] = notes\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) BatchUpdateNote() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tusername := this.GetString(\"cName\", \"\")\n\tnote := this.GetString(\"note\", \"\")\n\tids := this.GetString(\"ids\", \"[]\")\n\tstatus := this.GetString(\"status\", \"ignored\")\n\tif status == \"ignored\" && note == \"\" {\n\t\tnote = \"ignored by ignored api.\"\n\t}\n\tbossId := this.GetString(\"caseIds\", \"\")\n\tswitch {\n\tcase ids == \"[]\":\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\tcase note == \"\":\n\t\tthis.ResposeError(baseResp, \"You can not skip closed note\")\n\t\treturn\n\t}\n\terr = event.AddNote(username, note, ids, status, bossId)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) GetNotes() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tid := this.GetString(\"id\", \"xxx\")\n\tlimitNum, _ := this.GetInt(\"limit\", 0)\n\tstartTime, _ := this.GetInt64(\"startTime\", 0)\n\tendTime, _ := this.GetInt64(\"endTime\", 0)\n\tfilterIgnored, _ := this.GetBool(\"filterIgnored\", false)\n\tif id == \"xxx\" {\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any event id\")\n\t\treturn\n\t}\n\tnotes, err := event.GetNotes(id, limitNum, startTime, endTime, filterIgnored)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"notes\"] = notes\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) GetNote() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tid, _ := this.GetInt64(\"id\", 0)\n\tif id == 0 {\n\t\tthis.ResposeError(baseResp, \"You dosen't pick any note id\")\n\t\treturn\n\t}\n\tnote, err := event.GetNote(id)\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t}\n\tbaseResp.Data[\"note\"] = note\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n\nfunc (this *PortalController) CountNumOfTlp() {\n\tbaseResp := this.BasicRespGen()\n\t_, err := this.SessionCheck()\n\n\tif err != nil {\n\t\tthis.ResposeError(baseResp, err.Error())\n\t\treturn\n\t} else {\n\t\tnumberOfteam, err := event.CountNumOfTlp()\n\t\tif err != nil {\n\t\t\tthis.ResposeError(baseResp, err.Error())\n\t\t\treturn\n\t\t}\n\t\tbaseResp.Data[\"count\"] = numberOfteam\n\t}\n\tthis.ServeApiJson(baseResp)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package engine\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\n\/\/ Test changing the passphrase when user knows current\n\/\/ passphrase.\nfunc TestPassphraseChangeKnown(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tOldPassphrase: u.Passphrase,\n\t\tPassphrase: newPassphrase,\n\t}\n\n\t\/\/ using an empty secret ui to make sure existing pp doesn't come from ui prompt:\n\tctx := &Context{\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n}\n\n\/\/ Test changing the passphrase when user knows current\n\/\/ passphrase, prompt for it.\nfunc TestPassphraseChangeKnownPrompt(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\t\/\/ clear the passphrase stream cache to force a prompt\n\t\/\/ for the existing passphrase.\n\ttc.G.LoginState().Account(func(a *libkb.Account) {\n\t\ta.ClearStreamCache()\n\t}, \"clear stream cache\")\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t}\n\tsecui := u.NewSecretUI()\n\tctx := &Context{\n\t\tSecretUI: secui,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tif !secui.CalledGetKBPassphrase {\n\t\tt.Errorf(\"get kb passphrase not called\")\n\t}\n}\n\n\/\/ Test changing the passphrase after logging in via pubkey.\nfunc TestPassphraseChangeAfterPubkeyLogin(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\t\/\/ this should do a pubkey login\n\tLogout(tc)\n\n\tsecui := u.NewSecretUI()\n\tu.LoginWithSecretUI(secui, tc.G)\n\tif !secui.CalledGetSecret {\n\t\tt.Errorf(\"get secret not called\")\n\t}\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t}\n\tctx := &Context{\n\t\tSecretUI: secui,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n}\n\n\/\/ Test changing the passphrase when previous pp stream available.\nfunc TestPassphraseChangeKnownNotSupplied(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t}\n\tsecui := &libkb.TestSecretUI{}\n\tctx := &Context{\n\t\tSecretUI: secui,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tif secui.CalledGetKBPassphrase {\n\t\tt.Errorf(\"get kb passphrase called\")\n\t}\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase.\nfunc TestPassphraseChangeUnknown(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\t\/\/ this has a flaw: the passphrase stream cache is available.\n\t\/\/ it is being used to unlock the secret key to generate the\n\t\/\/ change passphrase proof.\n\t\/\/\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\tctx := &Context{\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase and there's no passphrase stream cache.\n\/\/ No backup key available.\nfunc TestPassphraseChangeUnknownNoPSCache(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tCreateAndSignupFakeUser(tc, \"login\")\n\n\ttc.G.LoginState().Account(func(a *libkb.Account) {\n\t\ta.ClearStreamCache()\n\t}, \"clear stream cache\")\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\tctx := &Context{\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\terr := RunEngine(eng, ctx)\n\tif err == nil {\n\t\tt.Fatal(\"passphrase change should have failed\")\n\t}\n\tif _, ok := err.(libkb.NoBackupKeysError); !ok {\n\t\tt.Fatalf(\"unexpected error: %s (%T)\", err, err)\n\t}\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase and there's no passphrase stream cache.\n\/\/ Backup key exists\nfunc TestPassphraseChangeUnknownBackupKey(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\tctx := &Context{\n\t\tLogUI: tc.G.UI.GetLogUI(),\n\t\tLoginUI: libkb.TestLoginUI{},\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\tbeng := NewBackup(tc.G)\n\tif err := RunEngine(beng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tbackupPassphrase := beng.Passphrase()\n\tctx.SecretUI = &libkb.TestSecretUI{BackupPassphrase: backupPassphrase}\n\n\ttc.G.LoginState().Account(func(a *libkb.Account) {\n\t\ta.ClearStreamCache()\n\t}, \"clear stream cache\")\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase and is logged out, but has a backup key.\nfunc TestPassphraseChangeLoggedOutBackupKey(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\tctx := &Context{\n\t\tLogUI: tc.G.UI.GetLogUI(),\n\t\tLoginUI: libkb.TestLoginUI{},\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\tbeng := NewBackup(tc.G)\n\tif err := RunEngine(beng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tbackupPassphrase := beng.Passphrase()\n\tctx.SecretUI = &libkb.TestSecretUI{BackupPassphrase: backupPassphrase}\n\n\tLogout(tc)\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n}\n<commit_msg>testing ability to load secret keys after changing passphrase. fails when backup key is used.<commit_after>package engine\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\n\/\/ Test changing the passphrase when user knows current\n\/\/ passphrase.\nfunc TestPassphraseChangeKnown(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tOldPassphrase: u.Passphrase,\n\t\tPassphrase: newPassphrase,\n\t}\n\n\t\/\/ using an empty secret ui to make sure existing pp doesn't come from ui prompt:\n\tctx := &Context{\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"passphrase change known\")\n}\n\n\/\/ Test changing the passphrase when user knows current\n\/\/ passphrase, prompt for it.\nfunc TestPassphraseChangeKnownPrompt(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\t\/\/ clear the passphrase stream cache to force a prompt\n\t\/\/ for the existing passphrase.\n\ttc.G.LoginState().Account(func(a *libkb.Account) {\n\t\ta.ClearStreamCache()\n\t}, \"clear stream cache\")\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t}\n\tsecui := u.NewSecretUI()\n\tctx := &Context{\n\t\tSecretUI: secui,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tif !secui.CalledGetKBPassphrase {\n\t\tt.Errorf(\"get kb passphrase not called\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"passphrase change known prompt\")\n}\n\n\/\/ Test changing the passphrase after logging in via pubkey.\nfunc TestPassphraseChangeAfterPubkeyLogin(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\t\/\/ this should do a pubkey login\n\tLogout(tc)\n\n\tsecui := u.NewSecretUI()\n\tu.LoginWithSecretUI(secui, tc.G)\n\tif !secui.CalledGetSecret {\n\t\tt.Errorf(\"get secret not called\")\n\t}\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t}\n\tctx := &Context{\n\t\tSecretUI: secui,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"passphrase change after pubkey login\")\n}\n\n\/\/ Test changing the passphrase when previous pp stream available.\nfunc TestPassphraseChangeKnownNotSupplied(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t}\n\tsecui := &libkb.TestSecretUI{}\n\tctx := &Context{\n\t\tSecretUI: secui,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tif secui.CalledGetKBPassphrase {\n\t\tt.Errorf(\"get kb passphrase called\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"passphrase change known, not supplied\")\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase.\nfunc TestPassphraseChangeUnknown(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\t\/\/ this has a flaw: the passphrase stream cache is available.\n\t\/\/ it is being used to unlock the secret key to generate the\n\t\/\/ change passphrase proof.\n\t\/\/\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\tctx := &Context{\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"passphrase change unknown\")\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase and there's no passphrase stream cache.\n\/\/ No backup key available.\nfunc TestPassphraseChangeUnknownNoPSCache(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\ttc.G.LoginState().Account(func(a *libkb.Account) {\n\t\ta.ClearStreamCache()\n\t}, \"clear stream cache\")\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\tctx := &Context{\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\terr := RunEngine(eng, ctx)\n\tif err == nil {\n\t\tt.Fatal(\"passphrase change should have failed\")\n\t}\n\tif _, ok := err.(libkb.NoBackupKeysError); !ok {\n\t\tt.Fatalf(\"unexpected error: %s (%T)\", err, err)\n\t}\n\n\tassertLoadSecretKeys(tc, u, \"passphrase change unknown, no ps cache\")\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase and there's no passphrase stream cache.\n\/\/ Backup key exists\nfunc TestPassphraseChangeUnknownBackupKey(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\tctx := &Context{\n\t\tLogUI: tc.G.UI.GetLogUI(),\n\t\tLoginUI: libkb.TestLoginUI{},\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\tbeng := NewBackup(tc.G)\n\tif err := RunEngine(beng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tbackupPassphrase := beng.Passphrase()\n\tctx.SecretUI = &libkb.TestSecretUI{BackupPassphrase: backupPassphrase}\n\n\ttc.G.LoginState().Account(func(a *libkb.Account) {\n\t\ta.ClearStreamCache()\n\t}, \"clear stream cache\")\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"passphrase change unknown, backup key\")\n}\n\nfunc assertLoadSecretKeys(tc libkb.TestContext, u *FakeUser, msg string) {\n\tme, err := libkb.LoadMe(libkb.LoadUserArg{})\n\tif err != nil {\n\t\ttc.T.Fatalf(\"%s: %s\", msg, err)\n\t}\n\tskarg := libkb.SecretKeyArg{\n\t\tMe: me,\n\t\tKeyType: libkb.DeviceSigningKeyType,\n\t}\n\tsigKey, _, err := tc.G.Keyrings.GetSecretKeyWithPrompt(nil, skarg, u.NewSecretUI(), \"testing sig\")\n\tif err != nil {\n\t\ttc.T.Fatalf(\"%s: %s\", msg, err)\n\t}\n\tif sigKey == nil {\n\t\ttc.T.Fatalf(\"%s: got nil signing key\", msg)\n\t}\n\n\tskarg.KeyType = libkb.DeviceEncryptionKeyType\n\tencKey, _, err := tc.G.Keyrings.GetSecretKeyWithPrompt(nil, skarg, u.NewSecretUI(), \"testing enc\")\n\tif err != nil {\n\t\ttc.T.Fatalf(\"%s: %s\", msg, err)\n\t}\n\tif encKey == nil {\n\t\ttc.T.Fatalf(\"%s: got nil encryption key\", msg)\n\t}\n}\n\n\/\/ Test changing the passphrase when user forgets current\n\/\/ passphrase and is logged out, but has a backup key.\nfunc TestPassphraseChangeLoggedOutBackupKey(t *testing.T) {\n\ttc := SetupEngineTest(t, \"PassphraseChange\")\n\tdefer tc.Cleanup()\n\n\tu := CreateAndSignupFakeUser(tc, \"login\")\n\n\tassertLoadSecretKeys(tc, u, \"logged out w\/ backup key, before passphrase change\")\n\n\tctx := &Context{\n\t\tLogUI: tc.G.UI.GetLogUI(),\n\t\tLoginUI: libkb.TestLoginUI{},\n\t\tSecretUI: &libkb.TestSecretUI{},\n\t}\n\tbeng := NewBackup(tc.G)\n\tif err := RunEngine(beng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tbackupPassphrase := beng.Passphrase()\n\tctx.SecretUI = &libkb.TestSecretUI{BackupPassphrase: backupPassphrase}\n\n\tLogout(tc)\n\n\tnewPassphrase := \"password\"\n\targ := &keybase1.PassphraseChangeArg{\n\t\tPassphrase: newPassphrase,\n\t\tForce: true,\n\t}\n\teng := NewPassphraseChange(arg, tc.G)\n\tif err := RunEngine(eng, ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = tc.G.LoginState().VerifyPlaintextPassphrase(u.Passphrase)\n\tif err == nil {\n\t\tt.Fatal(\"old passphrase passed verification\")\n\t}\n\n\tu.Passphrase = newPassphrase\n\tassertLoadSecretKeys(tc, u, \"logged out w\/ backup key, after passphrase change\")\n}\n<|endoftext|>"} {"text":"<commit_before>package zigbee\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"code.google.com\/p\/gogoprotobuf\/proto\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-zigbee\/gateway\"\n)\n\ntype ZStackGateway struct {\n\t*ZStackServer\n\tpendingResponses map[uint32]*pendingGatewayResponse\n\tzoneStateListeners map[uint64][]chan *gateway.DevZoneStatusChangeInd\n\tattributeReportListeners map[uint64][]chan *gateway.GwAttributeReportingInd\n}\n\ntype zStackGatewayCommand interface {\n\tproto.Message\n\tGetCmdId() gateway.GwCmdIdT\n}\n\ntype pendingGatewayResponse struct {\n\tresponse zStackGatewayCommand\n\tfinished chan error\n}\n\nfunc (s *ZStackGateway) OnZoneState(addr uint64) chan *gateway.DevZoneStatusChangeInd {\n\tc := make(chan *gateway.DevZoneStatusChangeInd)\n\n\tif _, ok := s.zoneStateListeners[addr]; !ok {\n\t\ts.zoneStateListeners[addr] = []chan *gateway.DevZoneStatusChangeInd{}\n\t}\n\n\ts.zoneStateListeners[addr] = append(s.zoneStateListeners[addr], c)\n\n\treturn c\n}\n\nfunc (s *ZStackGateway) OnAttributeReport(addr uint64) chan *gateway.GwAttributeReportingInd {\n\tc := make(chan *gateway.GwAttributeReportingInd)\n\n\tif _, ok := s.attributeReportListeners[addr]; !ok {\n\t\ts.attributeReportListeners[addr] = []chan *gateway.GwAttributeReportingInd{}\n\t}\n\n\ts.attributeReportListeners[addr] = append(s.attributeReportListeners[addr], c)\n\n\treturn c\n}\n\nfunc (s *ZStackGateway) waitForSequenceResponse(sequenceNumber uint32, response zStackGatewayCommand, timeoutDuration time.Duration) error {\n\t\/\/ We accept uint32 as thats what comes back from protobuf\n\tlog.Debugf(\"Waiting for sequence %d\", sequenceNumber)\n\t_, exists := s.pendingResponses[sequenceNumber]\n\tif exists {\n\t\ts.pendingResponses[sequenceNumber].finished <- fmt.Errorf(\"Another command with the same sequence id (%d) has been sent.\", sequenceNumber)\n\t}\n\n\tpending := &pendingGatewayResponse{\n\t\tresponse: response,\n\t\tfinished: make(chan error),\n\t}\n\ts.pendingResponses[sequenceNumber] = pending\n\n\ttimeout := make(chan bool, 1)\n\tgo func() {\n\t\ttime.Sleep(timeoutDuration)\n\t\ttimeout <- true\n\t}()\n\n\tvar err error\n\n\tselect {\n\tcase error := <-pending.finished:\n\t\terr = error\n\tcase <-timeout:\n\t\terr = fmt.Errorf(\"The request timed out after %s\", timeoutDuration)\n\t}\n\n\ts.pendingResponses[sequenceNumber] = nil\n\n\treturn err\n}\n\n\/\/ SendAsyncCommand sends a command that requires an async response from the device, using ZCL SequenceNumber\nfunc (s *ZStackGateway) SendAsyncCommand(request zStackGatewayCommand, response zStackGatewayCommand, timeout time.Duration) error {\n\tconfirmation := &gateway.GwZigbeeGenericCnf{}\n\n\t\/\/\tspew.Dump(\"sending\", request)\n\n\terr := s.SendCommand(request, confirmation)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/spew.Dump(confirmation)\n\n\tif confirmation.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Invalid confirmation status: %s\", confirmation.Status.String())\n\t}\n\n\treturn s.waitForSequenceResponse(*confirmation.SequenceNumber, response, timeout)\n}\n\n\/\/ SendCommand sends a protobuf Message to the Z-Stack server, and waits for the response\nfunc (s *ZStackGateway) SendCommand(request zStackGatewayCommand, response zStackGatewayCommand) error {\n\n\treturn s.sendCommand(\n\t\t&zStackCommand{\n\t\t\tmessage: request,\n\t\t\tcommandID: uint8(request.GetCmdId()),\n\t\t},\n\t\t&zStackCommand{\n\t\t\tmessage: response,\n\t\t\tcommandID: uint8(response.GetCmdId()),\n\t\t},\n\t)\n\n}\n\nfunc (s *ZStackGateway) onIncomingCommand(commandID uint8, bytes *[]byte) {\n\n\t\/\/bytes := <-s.Incoming\n\n\tlog.Debugf(\"gateway: Got gateway message %s\", gateway.GwCmdIdT_name[int32(commandID)])\n\n\t\/\/commandID := uint8((*bytes)[1])\n\n\tif commandID == uint8(gateway.GwCmdIdT_GW_ATTRIBUTE_REPORTING_IND) {\n\t\tlog.Debugf(\"gateway: Parsing as GwAttributeReportingInd\")\n\t\treport := &gateway.GwAttributeReportingInd{}\n\t\terr := proto.Unmarshal(*bytes, report)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read attribute report: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(\"Got attribute report\", report)\n\t\t}\n\n\t\tif listeners, ok := s.attributeReportListeners[*report.SrcAddress.IeeeAddr]; ok {\n\t\t\tfor _, listener := range listeners {\n\t\t\t\tgo func(l chan *gateway.GwAttributeReportingInd) {\n\t\t\t\t\tl <- report\n\t\t\t\t}(listener)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debugf(\"gateway: Received an unhandled attribute report from % X : %v\", *report.SrcAddress.IeeeAddr, report)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif commandID == uint8(gateway.GwCmdIdT_DEV_ZONE_STATUS_CHANGE_IND) {\n\n\t\tlog.Debugf(\"gateway: Parsing as GwDevZoneStatusChangeInd\")\n\n\t\tzoneStatus := &gateway.DevZoneStatusChangeInd{}\n\t\terr := proto.Unmarshal(*bytes, zoneStatus)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read zone status change: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(\"Got zone status change\", zoneStatus)\n\t\t}\n\n\t\tif listeners, ok := s.zoneStateListeners[*zoneStatus.SrcAddress.IeeeAddr]; ok {\n\t\t\tfor _, listener := range listeners {\n\t\t\t\tgo func(l chan *gateway.DevZoneStatusChangeInd) {\n\t\t\t\t\tl <- zoneStatus\n\t\t\t\t}(listener)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debugf(\"gateway: Received an unhandled zone status change from % X : %v\", *zoneStatus.SrcAddress.IeeeAddr, zoneStatus)\n\t\t}\n\n\t\treturn\n\t}\n\n\tvar sequenceNumber uint32\n\n\tif commandID == uint8(gateway.GwCmdIdT_ZIGBEE_GENERIC_CNF) {\n\t\tlog.Debugf(\"gateway: Parsing as GwZigbeeGenericCnf\")\n\t\tmessage := &gateway.GwZigbeeGenericCnf{}\n\t\terr := proto.Unmarshal(*bytes, message)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read generic confirmation: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tsequenceNumber = *message.SequenceNumber\n\n\t} else {\n\t\tlog.Debugf(\"gateway: Parsing as GwZigbeeGenericRspInd\")\n\t\tmessage := &gateway.GwZigbeeGenericRspInd{} \/\/ Not always this, but it will always give us the sequence number?\n\t\terr := proto.Unmarshal(*bytes, message)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not get sequence number from incoming gateway message : %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tsequenceNumber = *message.SequenceNumber\n\t}\n\n\tlog.Debugf(\"gateway: Got an incoming gateway message, sequence:%d\", sequenceNumber)\n\n\tif sequenceNumber == 0 {\n\t\tlog.Debugf(\"gateway: Failed to get a sequence number from an incoming gateway message: %x\", bytes)\n\t}\n\n\tpending := s.pendingResponses[sequenceNumber]\n\n\tif pending == nil {\n\t\tlog.Infof(\"gateway: Received response to sequence number %d but we aren't listening for it\", sequenceNumber)\n\t} else {\n\n\t\tif uint8(pending.response.GetCmdId()) != commandID {\n\t\t\tpending.finished <- fmt.Errorf(\"Wrong ZCL response type. Wanted: 0x%X Received: 0x%X\", uint8(pending.response.GetCmdId()), commandID)\n\t\t}\n\t\tpending.finished <- proto.Unmarshal(*bytes, pending.response)\n\t}\n\n}\n\nfunc ConnectToGatewayServer(hostname string, port int) (*ZStackGateway, error) {\n\tserver, err := connectToServer(\"Gateway\", uint8(gateway.ZStackGwSysIdT_RPC_SYS_PB_GW), hostname, port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgateway := &ZStackGateway{\n\t\tZStackServer: server,\n\t\tpendingResponses: make(map[uint32]*pendingGatewayResponse),\n\t\tzoneStateListeners: make(map[uint64][]chan *gateway.DevZoneStatusChangeInd),\n\t\tattributeReportListeners: make(map[uint64][]chan *gateway.GwAttributeReportingInd),\n\t}\n\n\tserver.onIncoming = func(commandID uint8, bytes *[]byte) {\n\t\tgateway.onIncomingCommand(commandID, bytes)\n\t}\n\n\treturn gateway, nil\n}\n<commit_msg>Add bound cluster listeners, clean up listeners in general<commit_after>package zigbee\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"code.google.com\/p\/gogoprotobuf\/proto\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-zigbee\/gateway\"\n)\n\ntype ZStackGateway struct {\n\t*ZStackServer\n\tpendingResponses map[uint32]*pendingGatewayResponse\n\tzoneStateListeners []zoneStateListener\n\tattributeReportListeners []attributeReportListener\n\tboundClustersListeners []boundClusterListener\n}\n\ntype zStackGatewayCommand interface {\n\tproto.Message\n\tGetCmdId() gateway.GwCmdIdT\n}\n\ntype pendingGatewayResponse struct {\n\tresponse zStackGatewayCommand\n\tfinished chan error\n}\n\ntype attributeReportListener struct {\n\taddress uint64\n\tendpoint uint32\n\tchannel chan *gateway.GwAttributeReportingInd\n}\n\ntype zoneStateListener struct {\n\taddress uint64\n\tendpoint uint32\n\tchannel chan *gateway.DevZoneStatusChangeInd\n}\n\nfunc (s *ZStackGateway) OnZoneState(addr uint64, endpoint uint32) chan *gateway.DevZoneStatusChangeInd {\n\tlistener := zoneStateListener{addr, endpoint, make(chan *gateway.DevZoneStatusChangeInd)}\n\n\ts.zoneStateListeners = append(s.zoneStateListeners, listener)\n\n\treturn listener.channel\n}\n\ntype boundClusterListener struct {\n\taddress uint64\n\tendpoint uint32\n\tcluster uint32\n\tchannel chan *gateway.GwZclFrameReceiveInd\n}\n\nfunc (s *ZStackGateway) OnBoundCluster(addr uint64, endpoint uint32, cluster uint32) chan *gateway.GwZclFrameReceiveInd {\n\tlistener := boundClusterListener{addr, endpoint, cluster, make(chan *gateway.GwZclFrameReceiveInd)}\n\n\ts.boundClustersListeners = append(s.boundClustersListeners, listener)\n\n\treturn listener.channel\n}\n\nfunc (s *ZStackGateway) waitForSequenceResponse(sequenceNumber uint32, response zStackGatewayCommand, timeoutDuration time.Duration) error {\n\t\/\/ We accept uint32 as thats what comes back from protobuf\n\tlog.Debugf(\"Waiting for sequence %d\", sequenceNumber)\n\t_, exists := s.pendingResponses[sequenceNumber]\n\tif exists {\n\t\ts.pendingResponses[sequenceNumber].finished <- fmt.Errorf(\"Another command with the same sequence id (%d) has been sent.\", sequenceNumber)\n\t}\n\n\tpending := &pendingGatewayResponse{\n\t\tresponse: response,\n\t\tfinished: make(chan error),\n\t}\n\ts.pendingResponses[sequenceNumber] = pending\n\n\ttimeout := make(chan bool, 1)\n\tgo func() {\n\t\ttime.Sleep(timeoutDuration)\n\t\ttimeout <- true\n\t}()\n\n\tvar err error\n\n\tselect {\n\tcase error := <-pending.finished:\n\t\terr = error\n\tcase <-timeout:\n\t\terr = fmt.Errorf(\"The request timed out after %s\", timeoutDuration)\n\t}\n\n\ts.pendingResponses[sequenceNumber] = nil\n\n\treturn err\n}\n\n\/\/ SendAsyncCommand sends a command that requires an async response from the device, using ZCL SequenceNumber\nfunc (s *ZStackGateway) SendAsyncCommand(request zStackGatewayCommand, response zStackGatewayCommand, timeout time.Duration) error {\n\tconfirmation := &gateway.GwZigbeeGenericCnf{}\n\n\t\/\/\tspew.Dump(\"sending\", request)\n\n\terr := s.SendCommand(request, confirmation)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/spew.Dump(confirmation)\n\n\tif confirmation.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Invalid confirmation status: %s\", confirmation.Status.String())\n\t}\n\n\treturn s.waitForSequenceResponse(*confirmation.SequenceNumber, response, timeout)\n}\n\n\/\/ SendCommand sends a protobuf Message to the Z-Stack server, and waits for the response\nfunc (s *ZStackGateway) SendCommand(request zStackGatewayCommand, response zStackGatewayCommand) error {\n\n\treturn s.sendCommand(\n\t\t&zStackCommand{\n\t\t\tmessage: request,\n\t\t\tcommandID: uint8(request.GetCmdId()),\n\t\t},\n\t\t&zStackCommand{\n\t\t\tmessage: response,\n\t\t\tcommandID: uint8(response.GetCmdId()),\n\t\t},\n\t)\n\n}\n\nfunc (s *ZStackGateway) onIncomingCommand(commandID uint8, bytes *[]byte) {\n\n\t\/\/bytes := <-s.Incoming\n\n\tlog.Debugf(\"gateway: Got gateway message %s\", gateway.GwCmdIdT_name[int32(commandID)])\n\n\t\/\/commandID := uint8((*bytes)[1])\n\n\tif commandID == uint8(gateway.GwCmdIdT_GW_ATTRIBUTE_REPORTING_IND) {\n\t\tlog.Debugf(\"gateway: Parsing as GwAttributeReportingInd\")\n\t\treport := &gateway.GwAttributeReportingInd{}\n\t\terr := proto.Unmarshal(*bytes, report)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read attribute report: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(\"Got attribute report\", report)\n\t\t}\n\n\t\tif len(s.attributeReportListeners) > 0 {\n\t\t\tfor _, listener := range s.attributeReportListeners {\n\t\t\t\tif listener.address == *report.SrcAddress.IeeeAddr && listener.endpoint == *report.SrcAddress.EndpointId {\n\n\t\t\t\t\tgo func(l chan *gateway.GwAttributeReportingInd) {\n\t\t\t\t\t\tl <- report\n\t\t\t\t\t}(listener.channel)\n\n\t\t\t\t}\n\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debugf(\"gateway: Received an unhandled attribute report from % X : %v\", *report.SrcAddress.IeeeAddr, report)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif commandID == uint8(gateway.GwCmdIdT_GW_ZCL_FRAME_RECEIVE_IND) {\n\n\t\tlog.Debugf(\"gateway: Parsing as GwZclFrameReceiveInd\")\n\n\t\tframe := &gateway.GwZclFrameReceiveInd{}\n\t\terr := proto.Unmarshal(*bytes, frame)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read GwZclFrameReceiveInd: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(\"Got zcl frame (bound cluster?)\", frame)\n\t\t}\n\n\t\thandled := false\n\n\t\tfor _, listener := range s.boundClustersListeners {\n\t\t\tif listener.address == *frame.SrcAddress.IeeeAddr && listener.endpoint == *frame.SrcAddress.EndpointId && listener.cluster == *frame.ClusterId {\n\n\t\t\t\tgo func(l chan *gateway.GwZclFrameReceiveInd) {\n\t\t\t\t\thandled = true\n\t\t\t\t\tl <- frame\n\t\t\t\t}(listener.channel)\n\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"Didn't match % X:% X, %d:%d, %d, %d\", listener.address, *frame.SrcAddress.IeeeAddr, listener.endpoint, *frame.SrcAddress.EndpointId, listener.cluster, *frame.ClusterId)\n\t\t\t}\n\n\t\t}\n\n\t\tif !handled {\n\t\t\tlog.Debugf(\"gateway: Received an unhandled zcl frame from % X : %v\", *frame.SrcAddress.IeeeAddr, frame)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif commandID == uint8(gateway.GwCmdIdT_DEV_ZONE_STATUS_CHANGE_IND) {\n\n\t\tlog.Debugf(\"gateway: Parsing as GwDevZoneStatusChangeInd\")\n\n\t\tzoneStatus := &gateway.DevZoneStatusChangeInd{}\n\t\terr := proto.Unmarshal(*bytes, zoneStatus)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read zone status change: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(\"Got zone status change\", zoneStatus)\n\t\t}\n\n\t\tif len(s.zoneStateListeners) > 0 {\n\t\t\tfor _, listener := range s.zoneStateListeners {\n\t\t\t\tif listener.address == *zoneStatus.SrcAddress.IeeeAddr && listener.endpoint == *zoneStatus.SrcAddress.EndpointId {\n\n\t\t\t\t\tgo func(l chan *gateway.DevZoneStatusChangeInd) {\n\t\t\t\t\t\tl <- zoneStatus\n\t\t\t\t\t}(listener.channel)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debugf(\"gateway: Received an unhandled zone status change from % X : %v\", *zoneStatus.SrcAddress.IeeeAddr, zoneStatus)\n\t\t}\n\n\t\treturn\n\t}\n\n\tvar sequenceNumber uint32\n\n\tif commandID == uint8(gateway.GwCmdIdT_ZIGBEE_GENERIC_CNF) {\n\t\tlog.Debugf(\"gateway: Parsing as GwZigbeeGenericCnf\")\n\t\tmessage := &gateway.GwZigbeeGenericCnf{}\n\t\terr := proto.Unmarshal(*bytes, message)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not read generic confirmation: %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tsequenceNumber = *message.SequenceNumber\n\n\t} else {\n\t\tlog.Debugf(\"gateway: Parsing as GwZigbeeGenericRspInd\")\n\t\tmessage := &gateway.GwZigbeeGenericRspInd{} \/\/ Not always this, but it will always give us the sequence number?\n\t\terr := proto.Unmarshal(*bytes, message)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"gateway: Could not get sequence number from incoming gateway message : %s, %v\", err, *bytes)\n\t\t\treturn\n\t\t}\n\n\t\tsequenceNumber = *message.SequenceNumber\n\t}\n\n\tlog.Debugf(\"gateway: Got an incoming gateway message, sequence:%d\", sequenceNumber)\n\n\tif sequenceNumber == 0 {\n\t\tlog.Debugf(\"gateway: Failed to get a sequence number from an incoming gateway message: %x\", bytes)\n\t}\n\n\tpending := s.pendingResponses[sequenceNumber]\n\n\tif pending == nil {\n\t\tlog.Infof(\"gateway: Received response to sequence number %d but we aren't listening for it\", sequenceNumber)\n\t} else {\n\n\t\tif uint8(pending.response.GetCmdId()) != commandID {\n\t\t\tpending.finished <- fmt.Errorf(\"Wrong ZCL response type. Wanted: 0x%X Received: 0x%X\", uint8(pending.response.GetCmdId()), commandID)\n\t\t}\n\t\tpending.finished <- proto.Unmarshal(*bytes, pending.response)\n\t}\n\n}\n\nfunc ConnectToGatewayServer(hostname string, port int) (*ZStackGateway, error) {\n\tserver, err := connectToServer(\"Gateway\", uint8(gateway.ZStackGwSysIdT_RPC_SYS_PB_GW), hostname, port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgateway := &ZStackGateway{\n\t\tZStackServer: server,\n\t\tpendingResponses: map[uint32]*pendingGatewayResponse{},\n\t\tzoneStateListeners: []zoneStateListener{},\n\t\tattributeReportListeners: []attributeReportListener{},\n\t\tboundClustersListeners: []boundClusterListener{},\n\t}\n\n\tserver.onIncoming = func(commandID uint8, bytes *[]byte) {\n\t\tgateway.onIncomingCommand(commandID, bytes)\n\t}\n\n\treturn gateway, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage topotools\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"vitess.io\/vitess\/go\/vt\/concurrency\"\n\t\"vitess.io\/vitess\/go\/vt\/logutil\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\/topoproto\"\n\n\ttopodatapb \"vitess.io\/vitess\/go\/vt\/proto\/topodata\"\n)\n\n\/\/ RebuildKeyspace rebuilds the serving graph data while locking out other changes.\nfunc RebuildKeyspace(ctx context.Context, log logutil.Logger, ts *topo.Server, keyspace string, cells []string) (err error) {\n\tctx, unlock, lockErr := ts.LockKeyspace(ctx, keyspace, \"RebuildKeyspace\")\n\tif lockErr != nil {\n\t\treturn lockErr\n\t}\n\tdefer unlock(&err)\n\n\treturn RebuildKeyspaceLocked(ctx, log, ts, keyspace, cells)\n}\n\n\/\/ findCellsForRebuild will find all the cells in the given keyspace\n\/\/ and create an entry if the map for them\nfunc findCellsForRebuild(ki *topo.KeyspaceInfo, shardMap map[string]*topo.ShardInfo, cells []string, srvKeyspaceMap map[string]*topodatapb.SrvKeyspace) {\n\tfor _, si := range shardMap {\n\t\tfor _, cell := range si.Cells {\n\t\t\tif !topo.InCellList(cell, cells) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := srvKeyspaceMap[cell]; !ok {\n\t\t\t\tsrvKeyspaceMap[cell] = &topodatapb.SrvKeyspace{\n\t\t\t\t\tShardingColumnName: ki.ShardingColumnName,\n\t\t\t\t\tShardingColumnType: ki.ShardingColumnType,\n\t\t\t\t\tServedFrom: ki.ComputeCellServedFrom(cell),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RebuildKeyspaceLocked should only be used with an action lock on the keyspace\n\/\/ - otherwise the consistency of the serving graph data can't be\n\/\/ guaranteed.\n\/\/\n\/\/ Take data from the global keyspace and rebuild the local serving\n\/\/ copies in each cell.\nfunc RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Server, keyspace string, cells []string) error {\n\tlog.Infof(\"rebuildKeyspace %v\", keyspace)\n\tif err := topo.CheckKeyspaceLocked(ctx, keyspace); err != nil {\n\t\treturn err\n\t}\n\n\tki, err := ts.GetKeyspace(ctx, keyspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tshards, err := ts.FindAllShardsInKeyspace(ctx, keyspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Build the list of cells to work on: we get the union\n\t\/\/ of all the Cells of all the Shards, limited to the provided cells.\n\t\/\/\n\t\/\/ srvKeyspaceMap is a map:\n\t\/\/ key: cell\n\t\/\/ value: topo.SrvKeyspace object being built\n\tsrvKeyspaceMap := make(map[string]*topodatapb.SrvKeyspace)\n\tfindCellsForRebuild(ki, shards, cells, srvKeyspaceMap)\n\n\t\/\/ Then we add the cells from the keyspaces we might be 'ServedFrom'.\n\tfor _, ksf := range ki.ServedFroms {\n\t\tservedFromShards, err := ts.FindAllShardsInKeyspace(ctx, ksf.Keyspace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfindCellsForRebuild(ki, servedFromShards, cells, srvKeyspaceMap)\n\t}\n\n\t\/\/ for each entry in the srvKeyspaceMap map, we do the following:\n\t\/\/ - get the Shard structures for each shard \/ cell\n\t\/\/ - if not present, build an empty one from global Shard\n\t\/\/ - compute the union of the db types (replica, master, ...)\n\t\/\/ - sort the shards in the list by range\n\t\/\/ - check the ranges are compatible (no hole, covers everything)\n\tfor cell, srvKeyspace := range srvKeyspaceMap {\n\t\tfor _, si := range shards {\n\t\t\tservedTypes := si.GetServedTypesPerCell(cell)\n\n\t\t\t\/\/ for each type this shard is supposed to serve,\n\t\t\t\/\/ add it to srvKeyspace.Partitions\n\t\t\tfor _, tabletType := range servedTypes {\n\t\t\t\tpartition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType)\n\t\t\t\tif partition == nil {\n\t\t\t\t\tpartition = &topodatapb.SrvKeyspace_KeyspacePartition{\n\t\t\t\t\t\tServedType: tabletType,\n\t\t\t\t\t}\n\t\t\t\t\tsrvKeyspace.Partitions = append(srvKeyspace.Partitions, partition)\n\t\t\t\t}\n\t\t\t\tpartition.ShardReferences = append(partition.ShardReferences, &topodatapb.ShardReference{\n\t\t\t\t\tName: si.ShardName(),\n\t\t\t\t\tKeyRange: si.KeyRange,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif err := orderAndCheckPartitions(cell, srvKeyspace); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ And then finally save the keyspace objects, in parallel.\n\trec := concurrency.AllErrorRecorder{}\n\twg := sync.WaitGroup{}\n\tfor cell, srvKeyspace := range srvKeyspaceMap {\n\t\twg.Add(1)\n\t\tgo func(cell string, srvKeyspace *topodatapb.SrvKeyspace) {\n\t\t\tdefer wg.Done()\n\t\t\tlog.Infof(\"updating keyspace serving graph in cell %v for %v\", cell, keyspace)\n\t\t\tif err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil {\n\t\t\t\trec.RecordError(fmt.Errorf(\"writing serving data failed: %v\", err))\n\t\t\t}\n\t\t}(cell, srvKeyspace)\n\t}\n\twg.Wait()\n\treturn rec.Error()\n}\n\n\/\/ orderAndCheckPartitions will re-order the partition list, and check\n\/\/ it's correct.\nfunc orderAndCheckPartitions(cell string, srvKeyspace *topodatapb.SrvKeyspace) error {\n\t\/\/ now check them all\n\tfor _, partition := range srvKeyspace.Partitions {\n\t\ttabletType := partition.ServedType\n\t\ttopoproto.ShardReferenceArray(partition.ShardReferences).Sort()\n\n\t\t\/\/ check the first Start is MinKey, the last End is MaxKey,\n\t\t\/\/ and the values in between match: End[i] == Start[i+1]\n\t\tfirst := partition.ShardReferences[0]\n\t\tif first.KeyRange != nil && len(first.KeyRange.Start) != 0 {\n\t\t\treturn fmt.Errorf(\"keyspace partition for %v in cell %v does not start with min key\", tabletType, cell)\n\t\t}\n\t\tlast := partition.ShardReferences[len(partition.ShardReferences)-1]\n\t\tif last.KeyRange != nil && len(last.KeyRange.End) != 0 {\n\t\t\treturn fmt.Errorf(\"keyspace partition for %v in cell %v does not end with max key\", tabletType, cell)\n\t\t}\n\t\tfor i := range partition.ShardReferences[0 : len(partition.ShardReferences)-1] {\n\t\t\tcurrShard := partition.ShardReferences[i]\n\t\t\tnextShard := partition.ShardReferences[i+1]\n\t\t\tcurrHasKeyRange := currShard.KeyRange != nil\n\t\t\tnextHasKeyRange := nextShard.KeyRange != nil\n\t\t\tif currHasKeyRange != nextHasKeyRange {\n\t\t\t\treturn fmt.Errorf(\"shards with inconsistent KeyRanges for %v in cell %v. shards: %v, %v\", tabletType, cell, currShard, nextShard)\n\t\t\t}\n\t\t\tif !currHasKeyRange {\n\t\t\t\t\/\/ this is the custom sharding case, all KeyRanges must be nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif bytes.Equal(currShard.KeyRange.End, nextShard.KeyRange.Start) {\n\t\t\t\treturn fmt.Errorf(\"non-contiguous KeyRange values for %v in cell %v at shard %v to %v: %v != %v\", tabletType, cell, i, i+1, hex.EncodeToString(currShard.KeyRange.End), hex.EncodeToString(nextShard.KeyRange.Start))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>staticcheck: topotools package<commit_after>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage topotools\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"vitess.io\/vitess\/go\/vt\/concurrency\"\n\t\"vitess.io\/vitess\/go\/vt\/logutil\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\/topoproto\"\n\n\ttopodatapb \"vitess.io\/vitess\/go\/vt\/proto\/topodata\"\n)\n\n\/\/ RebuildKeyspace rebuilds the serving graph data while locking out other changes.\nfunc RebuildKeyspace(ctx context.Context, log logutil.Logger, ts *topo.Server, keyspace string, cells []string) (err error) {\n\tctx, unlock, lockErr := ts.LockKeyspace(ctx, keyspace, \"RebuildKeyspace\")\n\tif lockErr != nil {\n\t\treturn lockErr\n\t}\n\tdefer unlock(&err)\n\n\treturn RebuildKeyspaceLocked(ctx, log, ts, keyspace, cells)\n}\n\n\/\/ findCellsForRebuild will find all the cells in the given keyspace\n\/\/ and create an entry if the map for them\nfunc findCellsForRebuild(ki *topo.KeyspaceInfo, shardMap map[string]*topo.ShardInfo, cells []string, srvKeyspaceMap map[string]*topodatapb.SrvKeyspace) {\n\tfor _, si := range shardMap {\n\t\tfor _, cell := range si.Cells {\n\t\t\tif !topo.InCellList(cell, cells) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := srvKeyspaceMap[cell]; !ok {\n\t\t\t\tsrvKeyspaceMap[cell] = &topodatapb.SrvKeyspace{\n\t\t\t\t\tShardingColumnName: ki.ShardingColumnName,\n\t\t\t\t\tShardingColumnType: ki.ShardingColumnType,\n\t\t\t\t\tServedFrom: ki.ComputeCellServedFrom(cell),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RebuildKeyspaceLocked should only be used with an action lock on the keyspace\n\/\/ - otherwise the consistency of the serving graph data can't be\n\/\/ guaranteed.\n\/\/\n\/\/ Take data from the global keyspace and rebuild the local serving\n\/\/ copies in each cell.\nfunc RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Server, keyspace string, cells []string) error {\n\tlog.Infof(\"rebuildKeyspace %v\", keyspace)\n\tif err := topo.CheckKeyspaceLocked(ctx, keyspace); err != nil {\n\t\treturn err\n\t}\n\n\tki, err := ts.GetKeyspace(ctx, keyspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tshards, err := ts.FindAllShardsInKeyspace(ctx, keyspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Build the list of cells to work on: we get the union\n\t\/\/ of all the Cells of all the Shards, limited to the provided cells.\n\t\/\/\n\t\/\/ srvKeyspaceMap is a map:\n\t\/\/ key: cell\n\t\/\/ value: topo.SrvKeyspace object being built\n\tsrvKeyspaceMap := make(map[string]*topodatapb.SrvKeyspace)\n\tfindCellsForRebuild(ki, shards, cells, srvKeyspaceMap)\n\n\t\/\/ Then we add the cells from the keyspaces we might be 'ServedFrom'.\n\tfor _, ksf := range ki.ServedFroms {\n\t\tservedFromShards, err := ts.FindAllShardsInKeyspace(ctx, ksf.Keyspace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfindCellsForRebuild(ki, servedFromShards, cells, srvKeyspaceMap)\n\t}\n\n\t\/\/ for each entry in the srvKeyspaceMap map, we do the following:\n\t\/\/ - get the Shard structures for each shard \/ cell\n\t\/\/ - if not present, build an empty one from global Shard\n\t\/\/ - compute the union of the db types (replica, master, ...)\n\t\/\/ - sort the shards in the list by range\n\t\/\/ - check the ranges are compatible (no hole, covers everything)\n\tfor cell, srvKeyspace := range srvKeyspaceMap {\n\t\tfor _, si := range shards {\n\t\t\tservedTypes := si.GetServedTypesPerCell(cell)\n\n\t\t\t\/\/ for each type this shard is supposed to serve,\n\t\t\t\/\/ add it to srvKeyspace.Partitions\n\t\t\tfor _, tabletType := range servedTypes {\n\t\t\t\tpartition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType)\n\t\t\t\tif partition == nil {\n\t\t\t\t\tpartition = &topodatapb.SrvKeyspace_KeyspacePartition{\n\t\t\t\t\t\tServedType: tabletType,\n\t\t\t\t\t}\n\t\t\t\t\tsrvKeyspace.Partitions = append(srvKeyspace.Partitions, partition)\n\t\t\t\t}\n\t\t\t\tpartition.ShardReferences = append(partition.ShardReferences, &topodatapb.ShardReference{\n\t\t\t\t\tName: si.ShardName(),\n\t\t\t\t\tKeyRange: si.KeyRange,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif err := orderAndCheckPartitions(cell, srvKeyspace); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ And then finally save the keyspace objects, in parallel.\n\trec := concurrency.AllErrorRecorder{}\n\twg := sync.WaitGroup{}\n\tfor cell, srvKeyspace := range srvKeyspaceMap {\n\t\twg.Add(1)\n\t\tgo func(cell string, srvKeyspace *topodatapb.SrvKeyspace) {\n\t\t\tdefer wg.Done()\n\t\t\tlog.Infof(\"updating keyspace serving graph in cell %v for %v\", cell, keyspace)\n\t\t\tif err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil {\n\t\t\t\trec.RecordError(fmt.Errorf(\"writing serving data failed: %v\", err))\n\t\t\t}\n\t\t}(cell, srvKeyspace)\n\t}\n\twg.Wait()\n\treturn rec.Error()\n}\n\n\/\/ orderAndCheckPartitions will re-order the partition list, and check\n\/\/ it's correct.\nfunc orderAndCheckPartitions(cell string, srvKeyspace *topodatapb.SrvKeyspace) error {\n\t\/\/ now check them all\n\tfor _, partition := range srvKeyspace.Partitions {\n\t\ttabletType := partition.ServedType\n\t\ttopoproto.ShardReferenceArray(partition.ShardReferences).Sort()\n\n\t\t\/\/ check the first Start is MinKey, the last End is MaxKey,\n\t\t\/\/ and the values in between match: End[i] == Start[i+1]\n\t\tfirst := partition.ShardReferences[0]\n\t\tif first.KeyRange != nil && len(first.KeyRange.Start) != 0 {\n\t\t\treturn fmt.Errorf(\"keyspace partition for %v in cell %v does not start with min key\", tabletType, cell)\n\t\t}\n\t\tlast := partition.ShardReferences[len(partition.ShardReferences)-1]\n\t\tif last.KeyRange != nil && len(last.KeyRange.End) != 0 {\n\t\t\treturn fmt.Errorf(\"keyspace partition for %v in cell %v does not end with max key\", tabletType, cell)\n\t\t}\n\t\tfor i := range partition.ShardReferences[0 : len(partition.ShardReferences)-1] {\n\t\t\tcurrShard := partition.ShardReferences[i]\n\t\t\tnextShard := partition.ShardReferences[i+1]\n\t\t\tcurrHasKeyRange := currShard.KeyRange != nil\n\t\t\tnextHasKeyRange := nextShard.KeyRange != nil\n\t\t\tif currHasKeyRange != nextHasKeyRange {\n\t\t\t\treturn fmt.Errorf(\"shards with inconsistent KeyRanges for %v in cell %v. shards: %v, %v\", tabletType, cell, currShard, nextShard)\n\t\t\t}\n\t\t\tif !currHasKeyRange {\n\t\t\t\t\/\/ this is the custom sharding case, all KeyRanges must be nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !bytes.Equal(currShard.KeyRange.End, nextShard.KeyRange.Start) {\n\t\t\t\treturn fmt.Errorf(\"non-contiguous KeyRange values for %v in cell %v at shard %v to %v: %v != %v\", tabletType, cell, i, i+1, hex.EncodeToString(currShard.KeyRange.End), hex.EncodeToString(nextShard.KeyRange.Start))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build linux\n\/\/ +build linux\n\n\/\/ Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/\thttp:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage appnet\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/api\/serviceconnect\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/logger\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/logger\/field\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/utils\/retry\"\n\tprometheus \"github.com\/prometheus\/client_model\/go\"\n)\n\nvar (\n\t\/\/ Injection point for UTs\n\toneSecondBackoffNoJitter = retry.NewExponentialBackoff(time.Second, time.Second, 0, 1)\n)\n\n\/\/ GetStats invokes Appnet Agent's stats API to retrieve ServiceConnect stats in prometheus format. This function expects\n\/\/ an Appnet-Agent-hosted HTTP server listening on the UDS path passed in config.\nfunc (cl *client) GetStats(config serviceconnect.RuntimeConfig) (map[string]*prometheus.MetricFamily, error) {\n\tresp, err := cl.performAppnetRequest(http.MethodGet, config.AdminSocketPath, config.StatsRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn parseServiceConnectStats(resp.Body)\n}\n\n\/\/ DrainInboundConnections invokes Appnet Agent's drain_listeners API which starts draining ServiceConnect inbound connections.\n\/\/ This function expects an Appnet-agent-hosted HTTP server listening on the UDS path passed in config.\nfunc (cl *client) DrainInboundConnections(config serviceconnect.RuntimeConfig) error {\n\treturn retry.RetryNWithBackoff(oneSecondBackoffNoJitter, 3, func() error {\n\t\tresp, err := cl.performAppnetRequest(http.MethodGet, config.AdminSocketPath, config.DrainRequest)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Error invoking Appnet's DrainInboundConnections\", logger.Fields{\n\t\t\t\t\"adminSocketPath\": config.AdminSocketPath,\n\t\t\t\tfield.Error: err,\n\t\t\t})\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\treturn nil\n\t})\n}\n\nfunc (cl *client) performAppnetRequest(method, udsPath, url string) (*http.Response, error) {\n\tctx := context.WithValue(context.Background(), udsAddressKey, udsPath)\n\treq, _ := http.NewRequestWithContext(ctx, method, url, nil)\n\thttpClient := cl.udsHttpClient\n\treturn httpClient.Do(req)\n}\n<commit_msg>Add a check for http response code<commit_after>\/\/go:build linux\n\/\/ +build linux\n\n\/\/ Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/\thttp:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage appnet\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/api\/serviceconnect\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/logger\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/logger\/field\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/utils\/retry\"\n\t\"github.com\/pkg\/errors\"\n\tprometheus \"github.com\/prometheus\/client_model\/go\"\n)\n\nvar (\n\t\/\/ Injection point for UTs\n\toneSecondBackoffNoJitter = retry.NewExponentialBackoff(time.Second, time.Second, 0, 1)\n)\n\n\/\/ GetStats invokes Appnet Agent's stats API to retrieve ServiceConnect stats in prometheus format. This function expects\n\/\/ an Appnet-Agent-hosted HTTP server listening on the UDS path passed in config.\nfunc (cl *client) GetStats(config serviceconnect.RuntimeConfig) (map[string]*prometheus.MetricFamily, error) {\n\tresp, err := cl.performAppnetRequest(http.MethodGet, config.AdminSocketPath, config.StatsRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.Wrapf(err, \"received non-OK HTTP status %v from Service Connect stats endpoint\", resp.StatusCode)\n\t}\n\treturn parseServiceConnectStats(resp.Body)\n}\n\n\/\/ DrainInboundConnections invokes Appnet Agent's drain_listeners API which starts draining ServiceConnect inbound connections.\n\/\/ This function expects an Appnet-agent-hosted HTTP server listening on the UDS path passed in config.\nfunc (cl *client) DrainInboundConnections(config serviceconnect.RuntimeConfig) error {\n\treturn retry.RetryNWithBackoff(oneSecondBackoffNoJitter, 3, func() error {\n\t\tresp, err := cl.performAppnetRequest(http.MethodGet, config.AdminSocketPath, config.DrainRequest)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Error invoking Appnet's DrainInboundConnections\", logger.Fields{\n\t\t\t\t\"adminSocketPath\": config.AdminSocketPath,\n\t\t\t\tfield.Error: err,\n\t\t\t})\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\treturn nil\n\t})\n}\n\nfunc (cl *client) performAppnetRequest(method, udsPath, url string) (*http.Response, error) {\n\tctx := context.WithValue(context.Background(), udsAddressKey, udsPath)\n\treq, _ := http.NewRequestWithContext(ctx, method, url, nil)\n\thttpClient := cl.udsHttpClient\n\treturn httpClient.Do(req)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage os\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n)\n\nfunc (p *Process) wait() (ps *ProcessState, err error) {\n\ts, e := syscall.WaitForSingleObject(syscall.Handle(p.handle), syscall.INFINITE)\n\tswitch s {\n\tcase syscall.WAIT_OBJECT_0:\n\t\tbreak\n\tcase syscall.WAIT_FAILED:\n\t\treturn nil, NewSyscallError(\"WaitForSingleObject\", e)\n\tdefault:\n\t\treturn nil, errors.New(\"os: unexpected result from WaitForSingleObject\")\n\t}\n\tvar ec uint32\n\te = syscall.GetExitCodeProcess(syscall.Handle(p.handle), &ec)\n\tif e != nil {\n\t\treturn nil, NewSyscallError(\"GetExitCodeProcess\", e)\n\t}\n\tvar u syscall.Rusage\n\te = syscall.GetProcessTimes(syscall.Handle(p.handle), &u.CreationTime, &u.ExitTime, &u.KernelTime, &u.UserTime)\n\tif e != nil {\n\t\treturn nil, NewSyscallError(\"GetProcessTimes\", e)\n\t}\n\tp.done = true\n\tdefer p.Release()\n\treturn &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil\n}\n\nfunc (p *Process) signal(sig Signal) error {\n\tif p.done {\n\t\treturn errors.New(\"os: process already finished\")\n\t}\n\tif sig == Kill {\n\t\te := syscall.TerminateProcess(syscall.Handle(p.handle), 1)\n\t\treturn NewSyscallError(\"TerminateProcess\", e)\n\t}\n\t\/\/ TODO(rsc): Handle Interrupt too?\n\treturn syscall.Errno(syscall.EWINDOWS)\n}\n\nfunc (p *Process) release() error {\n\tif p.handle == uintptr(syscall.InvalidHandle) {\n\t\treturn syscall.EINVAL\n\t}\n\te := syscall.CloseHandle(syscall.Handle(p.handle))\n\tif e != nil {\n\t\treturn NewSyscallError(\"CloseHandle\", e)\n\t}\n\tp.handle = uintptr(syscall.InvalidHandle)\n\t\/\/ no need for a finalizer anymore\n\truntime.SetFinalizer(p, nil)\n\treturn nil\n}\n\nfunc findProcess(pid int) (p *Process, err error) {\n\tconst da = syscall.STANDARD_RIGHTS_READ |\n\t\tsyscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE\n\th, e := syscall.OpenProcess(da, false, uint32(pid))\n\tif e != nil {\n\t\treturn nil, NewSyscallError(\"OpenProcess\", e)\n\t}\n\treturn newProcess(pid, uintptr(h)), nil\n}\n\nfunc init() {\n\tvar argc int32\n\tcmd := syscall.GetCommandLine()\n\targv, e := syscall.CommandLineToArgv(cmd, &argc)\n\tif e != nil {\n\t\treturn\n\t}\n\tdefer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))\n\tArgs = make([]string, argc)\n\tfor i, v := range (*argv)[:argc] {\n\t\tArgs[i] = string(syscall.UTF16ToString((*v)[:]))\n\t}\n}\n\nfunc ftToDuration(ft *syscall.Filetime) time.Duration {\n\tn := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) \/\/ in 100-nanosecond intervals\n\treturn time.Duration(n*100) * time.Nanosecond\n}\n\nfunc (p *ProcessState) userTime() time.Duration {\n\treturn ftToDuration(&p.rusage.UserTime)\n}\n\nfunc (p *ProcessState) systemTime() time.Duration {\n\treturn ftToDuration(&p.rusage.KernelTime)\n}\n<commit_msg>os: sleep 5ms after process has exited on windows<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage os\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n)\n\nfunc (p *Process) wait() (ps *ProcessState, err error) {\n\ts, e := syscall.WaitForSingleObject(syscall.Handle(p.handle), syscall.INFINITE)\n\tswitch s {\n\tcase syscall.WAIT_OBJECT_0:\n\t\tbreak\n\tcase syscall.WAIT_FAILED:\n\t\treturn nil, NewSyscallError(\"WaitForSingleObject\", e)\n\tdefault:\n\t\treturn nil, errors.New(\"os: unexpected result from WaitForSingleObject\")\n\t}\n\tvar ec uint32\n\te = syscall.GetExitCodeProcess(syscall.Handle(p.handle), &ec)\n\tif e != nil {\n\t\treturn nil, NewSyscallError(\"GetExitCodeProcess\", e)\n\t}\n\tvar u syscall.Rusage\n\te = syscall.GetProcessTimes(syscall.Handle(p.handle), &u.CreationTime, &u.ExitTime, &u.KernelTime, &u.UserTime)\n\tif e != nil {\n\t\treturn nil, NewSyscallError(\"GetProcessTimes\", e)\n\t}\n\tp.done = true\n\t\/\/ NOTE(brainman): It seems that sometimes process is not dead\n\t\/\/ when WaitForSingleObject returns. But we do not know any\n\t\/\/ other way to wait for it. Sleeping for a while seems to do\n\t\/\/ the trick sometimes. So we will sleep and smell the roses.\n\tdefer time.Sleep(5 * time.Millisecond)\n\tdefer p.Release()\n\treturn &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil\n}\n\nfunc (p *Process) signal(sig Signal) error {\n\tif p.done {\n\t\treturn errors.New(\"os: process already finished\")\n\t}\n\tif sig == Kill {\n\t\te := syscall.TerminateProcess(syscall.Handle(p.handle), 1)\n\t\treturn NewSyscallError(\"TerminateProcess\", e)\n\t}\n\t\/\/ TODO(rsc): Handle Interrupt too?\n\treturn syscall.Errno(syscall.EWINDOWS)\n}\n\nfunc (p *Process) release() error {\n\tif p.handle == uintptr(syscall.InvalidHandle) {\n\t\treturn syscall.EINVAL\n\t}\n\te := syscall.CloseHandle(syscall.Handle(p.handle))\n\tif e != nil {\n\t\treturn NewSyscallError(\"CloseHandle\", e)\n\t}\n\tp.handle = uintptr(syscall.InvalidHandle)\n\t\/\/ no need for a finalizer anymore\n\truntime.SetFinalizer(p, nil)\n\treturn nil\n}\n\nfunc findProcess(pid int) (p *Process, err error) {\n\tconst da = syscall.STANDARD_RIGHTS_READ |\n\t\tsyscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE\n\th, e := syscall.OpenProcess(da, false, uint32(pid))\n\tif e != nil {\n\t\treturn nil, NewSyscallError(\"OpenProcess\", e)\n\t}\n\treturn newProcess(pid, uintptr(h)), nil\n}\n\nfunc init() {\n\tvar argc int32\n\tcmd := syscall.GetCommandLine()\n\targv, e := syscall.CommandLineToArgv(cmd, &argc)\n\tif e != nil {\n\t\treturn\n\t}\n\tdefer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))\n\tArgs = make([]string, argc)\n\tfor i, v := range (*argv)[:argc] {\n\t\tArgs[i] = string(syscall.UTF16ToString((*v)[:]))\n\t}\n}\n\nfunc ftToDuration(ft *syscall.Filetime) time.Duration {\n\tn := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) \/\/ in 100-nanosecond intervals\n\treturn time.Duration(n*100) * time.Nanosecond\n}\n\nfunc (p *ProcessState) userTime() time.Duration {\n\treturn ftToDuration(&p.rusage.UserTime)\n}\n\nfunc (p *ProcessState) systemTime() time.Duration {\n\treturn ftToDuration(&p.rusage.KernelTime)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage os\n\nimport \"syscall\"\n\nfunc isSymlink(stat *syscall.Stat_t) bool {\n\treturn stat.Mode&syscall.S_IFMT == syscall.S_IFLNK\n}\n\nfunc dirFromStat(name string, dir *Dir, lstat, stat *syscall.Stat_t) *Dir {\n\tdir.Dev = uint64(stat.Dev)\n\tdir.Ino = uint64(stat.Ino)\n\tdir.Nlink = uint64(stat.Nlink)\n\tdir.Mode = uint32(stat.Mode)\n\tdir.Uid = stat.Uid\n\tdir.Gid = stat.Gid\n\tdir.Rdev = uint64(stat.Rdev)\n\tdir.Size = uint64(stat.Size)\n\tdir.Blksize = uint64(stat.Blksize)\n\tdir.Blocks = uint64(stat.Blocks)\n\tdir.Atime_ns = uint64(syscall.TimespecToNsec(stat.Atimespec))\n\tdir.Mtime_ns = uint64(syscall.TimespecToNsec(stat.Mtimespec))\n\tdir.Ctime_ns = uint64(syscall.TimespecToNsec(stat.Ctimespec))\n\tfor i := len(name) - 1; i >= 0; i-- {\n\t\tif name[i] == '\/' {\n\t\t\tname = name[i+1:]\n\t\t\tbreak\n\t\t}\n\t}\n\tdir.Name = name\n\tif isSymlink(lstat) && !isSymlink(stat) {\n\t\tdir.FollowedSymlink = true\n\t}\n\treturn dir\n}\n<commit_msg>freebsd: fix build, maybe<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage os\n\nimport \"syscall\"\n\nfunc isSymlink(stat *syscall.Stat_t) bool {\n\treturn stat.Mode&syscall.S_IFMT == syscall.S_IFLNK\n}\n\nfunc fileInfoFromStat(name string, fi *FileInfo, lstat, stat *syscall.Stat_t) *FileInfo {\n\tfi.Dev = uint64(stat.Dev)\n\tfi.Ino = uint64(stat.Ino)\n\tfi.Nlink = uint64(stat.Nlink)\n\tfi.Mode = uint32(stat.Mode)\n\tfi.Uid = stat.Uid\n\tfi.Gid = stat.Gid\n\tfi.Rdev = uint64(stat.Rdev)\n\tfi.Size = uint64(stat.Size)\n\tfi.Blksize = uint64(stat.Blksize)\n\tfi.Blocks = uint64(stat.Blocks)\n\tfi.Atime_ns = uint64(syscall.TimespecToNsec(stat.Atimespec))\n\tfi.Mtime_ns = uint64(syscall.TimespecToNsec(stat.Mtimespec))\n\tfi.Ctime_ns = uint64(syscall.TimespecToNsec(stat.Ctimespec))\n\tfor i := len(name) - 1; i >= 0; i-- {\n\t\tif name[i] == '\/' {\n\t\t\tname = name[i+1:]\n\t\t\tbreak\n\t\t}\n\t}\n\tfi.Name = name\n\tif isSymlink(lstat) && !isSymlink(stat) {\n\t\tfi.FollowedSymlink = true\n\t}\n\treturn fi\n}\n<|endoftext|>"} {"text":"<commit_before>package hlt\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n)\n\n\/\/ DockingStatus represents possible ship.DockingStatus values\ntype DockingStatus int\n\nconst (\n\t\/\/ UNDOCKED ship.DockingStatus value\n\tUNDOCKED DockingStatus = iota\n\t\/\/ DOCKING ship.DockingStatus value\n\tDOCKING\n\t\/\/ DOCKED ship.DockingStatus value\n\tDOCKED\n\t\/\/ UNDOCKING ship.DockingStatus value\n\tUNDOCKING\n)\n\n\/\/ Entity captures spacial and ownership state for Planets and Ships\ntype Entity struct {\n\tX float64\n\tY float64\n\tRadius float64\n\tHealth float64\n\tOwner int\n\tID int\n}\n\n\/\/ Position in 2D space\ntype Position struct {\n\tX, Y float64\n}\n\n\/\/ Planet object from which Halite is mined\ntype Planet struct {\n\tEntity\n\tNumDockingSpots float64\n\tNumDockedShips float64\n\tCurrentProduction float64\n\tRemainingResources float64\n\tDockedShipIDs []int\n\tDockedShips []Ship\n\tOwned float64\n\tDistance float64\n}\n\n\/\/ Ship is a player controlled Entity made for the purpose of doing combat and mining Halite\ntype Ship struct {\n\tEntity\n\tVelX float64\n\tVelY float64\n\n\tPlanetID int\n\tPlanet Planet\n\tDockingStatus DockingStatus\n\tDockingProgress float64\n\tWeaponCooldown float64\n}\n\n\/\/ CalculateDistanceTo returns a euclidean distance to the target\nfunc (entity Entity) CalculateDistanceTo(target Entity) float64 {\n\tdx := target.X - entity.X\n\tdy := target.Y - entity.Y\n\n\treturn math.Sqrt(dx*dx + dy*dy)\n}\n\n\/\/ CalculateAngleTo returns an angle in degrees to the target\nfunc (entity Entity) CalculateAngleTo(target Entity) float64 {\n\treturn RadToDeg(entity.CalculateRadAngleTo(target))\n}\n\n\/\/ CalculateRadAngleTo returns an angle in radians to the target\nfunc (entity Entity) CalculateRadAngleTo(target Entity) float64 {\n\tdx := target.X - entity.X\n\tdy := target.Y - entity.Y\n\n\treturn math.Atan2(dy, dx)\n}\n\n\/\/ ClosestPointTo returns the closest point that is at least minDistance from the target\nfunc (entity Entity) ClosestPointTo(target Entity, minDistance float64) Entity {\n\tdist := entity.CalculateDistanceTo(target) - target.Radius - minDistance\n\tangle := target.CalculateRadAngleTo(entity)\n\tx := target.X + dist*math.Cos(angle)\n\ty := target.Y + dist*math.Sin(angle)\n\treturn Entity{\n\t\tX: x,\n\t\tY: y,\n\t\tRadius: 0,\n\t\tHealth: 0,\n\t\tOwner: -1,\n\t\tID: -1,\n\t}\n}\n\n\/\/ ParseShip from a slice of game state tokens\nfunc ParseShip(playerID int, tokens []string) (Ship, []string) {\n\tshipID, _ := strconv.Atoi(tokens[0])\n\tshipX, _ := strconv.ParseFloat(tokens[1], 64)\n\tshipY, _ := strconv.ParseFloat(tokens[2], 64)\n\tshipHealth, _ := strconv.ParseFloat(tokens[3], 64)\n\tshipVelX, _ := strconv.ParseFloat(tokens[4], 64)\n\tshipVelY, _ := strconv.ParseFloat(tokens[5], 64)\n\tshipDockingStatus, _ := strconv.Atoi(tokens[6])\n\tshipPlanetID, _ := strconv.Atoi(tokens[7])\n\tshipDockingProgress, _ := strconv.ParseFloat(tokens[8], 64)\n\tshipWeaponCooldown, _ := strconv.ParseFloat(tokens[9], 64)\n\n\tshipEntity := Entity{\n\t\tX: shipX,\n\t\tY: shipY,\n\t\tRadius: .5,\n\t\tHealth: shipHealth,\n\t\tOwner: playerID,\n\t\tID: shipID,\n\t}\n\n\tship := Ship{\n\t\tPlanetID: shipPlanetID,\n\t\tDockingStatus: IntToDockingStatus(shipDockingStatus),\n\t\tDockingProgress: shipDockingProgress,\n\t\tWeaponCooldown: shipWeaponCooldown,\n\t\tVelX: shipVelX,\n\t\tVelY: shipVelY,\n\t\tEntity: shipEntity,\n\t}\n\n\treturn ship, tokens[10:]\n}\n\n\/\/ ParsePlanet from a slice of game state tokens\nfunc ParsePlanet(tokens []string) (Planet, []string) {\n\tplanetID, _ := strconv.Atoi(tokens[0])\n\tplanetX, _ := strconv.ParseFloat(tokens[1], 64)\n\tplanetY, _ := strconv.ParseFloat(tokens[2], 64)\n\tplanetHealth, _ := strconv.ParseFloat(tokens[3], 64)\n\tplanetRadius, _ := strconv.ParseFloat(tokens[4], 64)\n\tplanetNumDockingSpots, _ := strconv.ParseFloat(tokens[5], 64)\n\tplanetCurrentProduction, _ := strconv.ParseFloat(tokens[6], 64)\n\tplanetRemainingResources, _ := strconv.ParseFloat(tokens[7], 64)\n\tplanetOwned, _ := strconv.ParseFloat(tokens[8], 64)\n\tplanetOwner, _ := strconv.Atoi(tokens[9])\n\tplanetNumDockedShips, _ := strconv.ParseFloat(tokens[10], 64)\n\n\tplanetEntity := Entity{\n\t\tX: planetX,\n\t\tY: planetY,\n\t\tRadius: planetRadius,\n\t\tHealth: planetHealth,\n\t\tOwner: planetOwner,\n\t\tID: planetID,\n\t}\n\n\tplanet := Planet{\n\t\tNumDockingSpots: planetNumDockingSpots,\n\t\tNumDockedShips: planetNumDockedShips,\n\t\tCurrentProduction: planetCurrentProduction,\n\t\tRemainingResources: planetRemainingResources,\n\t\tDockedShipIDs: nil,\n\t\tDockedShips: nil,\n\t\tOwned: planetOwned,\n\t\tEntity: planetEntity,\n\t}\n\n\tfor i := 0; i < int(planetNumDockedShips); i++ {\n\t\tdockedShipID, _ := strconv.Atoi(tokens[11+i])\n\t\tplanet.DockedShipIDs = append(planet.DockedShipIDs, dockedShipID)\n\t}\n\treturn planet, tokens[11+int(planetNumDockedShips):]\n}\n\n\/\/ IntToDockingStatus converts an int to a DockingStatus\nfunc IntToDockingStatus(i int) DockingStatus {\n\tstatuses := [4]DockingStatus{UNDOCKED, DOCKING, DOCKED, UNDOCKING}\n\treturn statuses[i]\n}\n\n\/\/ Thrust generates a string describing the ship's intension to move during the current turn\nfunc (ship Ship) Thrust(magnitude float64, angle float64) string {\n\tvar boundedAngle int\n\tif (angle > 0.0) {\n\t\tboundedAngle = int(math.Floor(angle + .5))\n\t} else {\n\t\tboundedAngle = int(math.Ceil(angle - .5))\n\t}\n\tboundedAngle = ((boundedAngle % 360) + 360) % 360\n\treturn fmt.Sprintf(\"t %s %s %s\", strconv.Itoa(ship.ID), strconv.Itoa(int(magnitude)), strconv.Itoa(boundedAngle))\n}\n\n\/\/ Dock generates a string describing the ship's intension to dock during the current turn\nfunc (ship Ship) Dock(planet Planet) string {\n\treturn fmt.Sprintf(\"d %s %s\", strconv.Itoa(ship.ID), strconv.Itoa(planet.ID))\n}\n\n\/\/ Undock generates a string describing the ship's intension to undock during the current turn\nfunc (ship Ship) Undock() string {\n\treturn fmt.Sprintf(\"u %s\", strconv.Itoa(ship.ID))\n}\n\n\/\/ NavigateBasic demonstrates how the player might move ships through space\nfunc (ship Ship) NavigateBasic(target Entity, gameMap Map) string {\n\tdistance := ship.CalculateDistanceTo(target)\n\tsafeDistance := distance - ship.Entity.Radius - target.Radius - .1\n\n\tangle := ship.CalculateAngleTo(target)\n\tspeed := 7.0\n\tif distance < 10 {\n\t\tspeed = 3.0\n\t}\n\n\tspeed = math.Min(speed, safeDistance)\n\treturn ship.Thrust(speed, angle)\n}\n\n\/\/ CanDock indicates that a ship is close enough to a given planet to dock\nfunc (ship Ship) CanDock(planet Planet) bool {\n\tdist := ship.CalculateDistanceTo(planet.Entity)\n\n\treturn dist <= (planet.Radius + 4)\n}\n\n\/\/ Navigate demonstrates how the player might negotiate obsticles between\n\/\/ a ship and its target\nfunc (ship Ship) Navigate(target Entity, gameMap Map) string {\n\tob := gameMap.ObstaclesBetween(ship.Entity, target)\n\n\tif !ob {\n\t\treturn ship.NavigateBasic(target, gameMap)\n\t}\n\n\tx0 := math.Min(ship.X, target.X)\n\tx2 := math.Max(ship.X, target.X)\n\ty0 := math.Min(ship.Y, target.Y)\n\ty2 := math.Max(ship.Y, target.Y)\n\n\tdx := (x2 - x0) \/ 5\n\tdy := (y2 - y0) \/ 5\n\tbestdist := 1000.0\n\tbestTarget := target\n\n\tfor x1 := x0; x1 <= x2; x1 += dx {\n\t\tfor y1 := y0; y1 <= y2; y1 += dy {\n\t\t\tintermediateTarget := Entity{\n\t\t\t\tX: x1,\n\t\t\t\tY: y1,\n\t\t\t\tRadius: 0,\n\t\t\t\tHealth: 0,\n\t\t\t\tOwner: 0,\n\t\t\t\tID: -1,\n\t\t\t}\n\t\t\tob1 := gameMap.ObstaclesBetween(ship.Entity, intermediateTarget)\n\t\t\tif !ob1 {\n\t\t\t\tob2 := gameMap.ObstaclesBetween(intermediateTarget, target)\n\t\t\t\tif !ob2 {\n\t\t\t\t\ttotdist := math.Sqrt(math.Pow(x1-x0, 2)+math.Pow(y1-y0, 2)) + math.Sqrt(math.Pow(x1-x2, 2)+math.Pow(y1-y2, 2))\n\t\t\t\t\tif totdist < bestdist {\n\t\t\t\t\t\tbestdist = totdist\n\t\t\t\t\t\tbestTarget = intermediateTarget\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ship.NavigateBasic(bestTarget, gameMap)\n}\n<commit_msg>Remove redundant parentheses in the Go starter kit<commit_after>package hlt\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n)\n\n\/\/ DockingStatus represents possible ship.DockingStatus values\ntype DockingStatus int\n\nconst (\n\t\/\/ UNDOCKED ship.DockingStatus value\n\tUNDOCKED DockingStatus = iota\n\t\/\/ DOCKING ship.DockingStatus value\n\tDOCKING\n\t\/\/ DOCKED ship.DockingStatus value\n\tDOCKED\n\t\/\/ UNDOCKING ship.DockingStatus value\n\tUNDOCKING\n)\n\n\/\/ Entity captures spacial and ownership state for Planets and Ships\ntype Entity struct {\n\tX float64\n\tY float64\n\tRadius float64\n\tHealth float64\n\tOwner int\n\tID int\n}\n\n\/\/ Position in 2D space\ntype Position struct {\n\tX, Y float64\n}\n\n\/\/ Planet object from which Halite is mined\ntype Planet struct {\n\tEntity\n\tNumDockingSpots float64\n\tNumDockedShips float64\n\tCurrentProduction float64\n\tRemainingResources float64\n\tDockedShipIDs []int\n\tDockedShips []Ship\n\tOwned float64\n\tDistance float64\n}\n\n\/\/ Ship is a player controlled Entity made for the purpose of doing combat and mining Halite\ntype Ship struct {\n\tEntity\n\tVelX float64\n\tVelY float64\n\n\tPlanetID int\n\tPlanet Planet\n\tDockingStatus DockingStatus\n\tDockingProgress float64\n\tWeaponCooldown float64\n}\n\n\/\/ CalculateDistanceTo returns a euclidean distance to the target\nfunc (entity Entity) CalculateDistanceTo(target Entity) float64 {\n\tdx := target.X - entity.X\n\tdy := target.Y - entity.Y\n\n\treturn math.Sqrt(dx*dx + dy*dy)\n}\n\n\/\/ CalculateAngleTo returns an angle in degrees to the target\nfunc (entity Entity) CalculateAngleTo(target Entity) float64 {\n\treturn RadToDeg(entity.CalculateRadAngleTo(target))\n}\n\n\/\/ CalculateRadAngleTo returns an angle in radians to the target\nfunc (entity Entity) CalculateRadAngleTo(target Entity) float64 {\n\tdx := target.X - entity.X\n\tdy := target.Y - entity.Y\n\n\treturn math.Atan2(dy, dx)\n}\n\n\/\/ ClosestPointTo returns the closest point that is at least minDistance from the target\nfunc (entity Entity) ClosestPointTo(target Entity, minDistance float64) Entity {\n\tdist := entity.CalculateDistanceTo(target) - target.Radius - minDistance\n\tangle := target.CalculateRadAngleTo(entity)\n\tx := target.X + dist*math.Cos(angle)\n\ty := target.Y + dist*math.Sin(angle)\n\treturn Entity{\n\t\tX: x,\n\t\tY: y,\n\t\tRadius: 0,\n\t\tHealth: 0,\n\t\tOwner: -1,\n\t\tID: -1,\n\t}\n}\n\n\/\/ ParseShip from a slice of game state tokens\nfunc ParseShip(playerID int, tokens []string) (Ship, []string) {\n\tshipID, _ := strconv.Atoi(tokens[0])\n\tshipX, _ := strconv.ParseFloat(tokens[1], 64)\n\tshipY, _ := strconv.ParseFloat(tokens[2], 64)\n\tshipHealth, _ := strconv.ParseFloat(tokens[3], 64)\n\tshipVelX, _ := strconv.ParseFloat(tokens[4], 64)\n\tshipVelY, _ := strconv.ParseFloat(tokens[5], 64)\n\tshipDockingStatus, _ := strconv.Atoi(tokens[6])\n\tshipPlanetID, _ := strconv.Atoi(tokens[7])\n\tshipDockingProgress, _ := strconv.ParseFloat(tokens[8], 64)\n\tshipWeaponCooldown, _ := strconv.ParseFloat(tokens[9], 64)\n\n\tshipEntity := Entity{\n\t\tX: shipX,\n\t\tY: shipY,\n\t\tRadius: .5,\n\t\tHealth: shipHealth,\n\t\tOwner: playerID,\n\t\tID: shipID,\n\t}\n\n\tship := Ship{\n\t\tPlanetID: shipPlanetID,\n\t\tDockingStatus: IntToDockingStatus(shipDockingStatus),\n\t\tDockingProgress: shipDockingProgress,\n\t\tWeaponCooldown: shipWeaponCooldown,\n\t\tVelX: shipVelX,\n\t\tVelY: shipVelY,\n\t\tEntity: shipEntity,\n\t}\n\n\treturn ship, tokens[10:]\n}\n\n\/\/ ParsePlanet from a slice of game state tokens\nfunc ParsePlanet(tokens []string) (Planet, []string) {\n\tplanetID, _ := strconv.Atoi(tokens[0])\n\tplanetX, _ := strconv.ParseFloat(tokens[1], 64)\n\tplanetY, _ := strconv.ParseFloat(tokens[2], 64)\n\tplanetHealth, _ := strconv.ParseFloat(tokens[3], 64)\n\tplanetRadius, _ := strconv.ParseFloat(tokens[4], 64)\n\tplanetNumDockingSpots, _ := strconv.ParseFloat(tokens[5], 64)\n\tplanetCurrentProduction, _ := strconv.ParseFloat(tokens[6], 64)\n\tplanetRemainingResources, _ := strconv.ParseFloat(tokens[7], 64)\n\tplanetOwned, _ := strconv.ParseFloat(tokens[8], 64)\n\tplanetOwner, _ := strconv.Atoi(tokens[9])\n\tplanetNumDockedShips, _ := strconv.ParseFloat(tokens[10], 64)\n\n\tplanetEntity := Entity{\n\t\tX: planetX,\n\t\tY: planetY,\n\t\tRadius: planetRadius,\n\t\tHealth: planetHealth,\n\t\tOwner: planetOwner,\n\t\tID: planetID,\n\t}\n\n\tplanet := Planet{\n\t\tNumDockingSpots: planetNumDockingSpots,\n\t\tNumDockedShips: planetNumDockedShips,\n\t\tCurrentProduction: planetCurrentProduction,\n\t\tRemainingResources: planetRemainingResources,\n\t\tDockedShipIDs: nil,\n\t\tDockedShips: nil,\n\t\tOwned: planetOwned,\n\t\tEntity: planetEntity,\n\t}\n\n\tfor i := 0; i < int(planetNumDockedShips); i++ {\n\t\tdockedShipID, _ := strconv.Atoi(tokens[11+i])\n\t\tplanet.DockedShipIDs = append(planet.DockedShipIDs, dockedShipID)\n\t}\n\treturn planet, tokens[11+int(planetNumDockedShips):]\n}\n\n\/\/ IntToDockingStatus converts an int to a DockingStatus\nfunc IntToDockingStatus(i int) DockingStatus {\n\tstatuses := [4]DockingStatus{UNDOCKED, DOCKING, DOCKED, UNDOCKING}\n\treturn statuses[i]\n}\n\n\/\/ Thrust generates a string describing the ship's intension to move during the current turn\nfunc (ship Ship) Thrust(magnitude float64, angle float64) string {\n\tvar boundedAngle int\n\tif angle > 0.0 {\n\t\tboundedAngle = int(math.Floor(angle + .5))\n\t} else {\n\t\tboundedAngle = int(math.Ceil(angle - .5))\n\t}\n\tboundedAngle = ((boundedAngle % 360) + 360) % 360\n\treturn fmt.Sprintf(\"t %s %s %s\", strconv.Itoa(ship.ID), strconv.Itoa(int(magnitude)), strconv.Itoa(boundedAngle))\n}\n\n\/\/ Dock generates a string describing the ship's intension to dock during the current turn\nfunc (ship Ship) Dock(planet Planet) string {\n\treturn fmt.Sprintf(\"d %s %s\", strconv.Itoa(ship.ID), strconv.Itoa(planet.ID))\n}\n\n\/\/ Undock generates a string describing the ship's intension to undock during the current turn\nfunc (ship Ship) Undock() string {\n\treturn fmt.Sprintf(\"u %s\", strconv.Itoa(ship.ID))\n}\n\n\/\/ NavigateBasic demonstrates how the player might move ships through space\nfunc (ship Ship) NavigateBasic(target Entity, gameMap Map) string {\n\tdistance := ship.CalculateDistanceTo(target)\n\tsafeDistance := distance - ship.Entity.Radius - target.Radius - .1\n\n\tangle := ship.CalculateAngleTo(target)\n\tspeed := 7.0\n\tif distance < 10 {\n\t\tspeed = 3.0\n\t}\n\n\tspeed = math.Min(speed, safeDistance)\n\treturn ship.Thrust(speed, angle)\n}\n\n\/\/ CanDock indicates that a ship is close enough to a given planet to dock\nfunc (ship Ship) CanDock(planet Planet) bool {\n\tdist := ship.CalculateDistanceTo(planet.Entity)\n\n\treturn dist <= (planet.Radius + 4)\n}\n\n\/\/ Navigate demonstrates how the player might negotiate obsticles between\n\/\/ a ship and its target\nfunc (ship Ship) Navigate(target Entity, gameMap Map) string {\n\tob := gameMap.ObstaclesBetween(ship.Entity, target)\n\n\tif !ob {\n\t\treturn ship.NavigateBasic(target, gameMap)\n\t}\n\n\tx0 := math.Min(ship.X, target.X)\n\tx2 := math.Max(ship.X, target.X)\n\ty0 := math.Min(ship.Y, target.Y)\n\ty2 := math.Max(ship.Y, target.Y)\n\n\tdx := (x2 - x0) \/ 5\n\tdy := (y2 - y0) \/ 5\n\tbestdist := 1000.0\n\tbestTarget := target\n\n\tfor x1 := x0; x1 <= x2; x1 += dx {\n\t\tfor y1 := y0; y1 <= y2; y1 += dy {\n\t\t\tintermediateTarget := Entity{\n\t\t\t\tX: x1,\n\t\t\t\tY: y1,\n\t\t\t\tRadius: 0,\n\t\t\t\tHealth: 0,\n\t\t\t\tOwner: 0,\n\t\t\t\tID: -1,\n\t\t\t}\n\t\t\tob1 := gameMap.ObstaclesBetween(ship.Entity, intermediateTarget)\n\t\t\tif !ob1 {\n\t\t\t\tob2 := gameMap.ObstaclesBetween(intermediateTarget, target)\n\t\t\t\tif !ob2 {\n\t\t\t\t\ttotdist := math.Sqrt(math.Pow(x1-x0, 2)+math.Pow(y1-y0, 2)) + math.Sqrt(math.Pow(x1-x2, 2)+math.Pow(y1-y2, 2))\n\t\t\t\t\tif totdist < bestdist {\n\t\t\t\t\t\tbestdist = totdist\n\t\t\t\t\t\tbestTarget = intermediateTarget\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ship.NavigateBasic(bestTarget, gameMap)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The pubsubtool executable is a convenient way to create PubSub topics and subscriptions.\n\/\/ It also allows for manual injection of messages to test systems end-to-end.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"cloud.google.com\/go\/storage\"\n\n\t\"go.skia.org\/infra\/go\/skerr\"\n\t\"go.skia.org\/infra\/go\/sklog\"\n)\n\nfunc main() {\n\tbucketName := flag.String(\"bucket_name\", \"\", \"The GCS bucket to listen to (see bucket_notifications)\")\n\tprojectID := flag.String(\"project_id\", \"skia-public\", \"The project for PubSub events\")\n\ttopicName := flag.String(\"topic_name\", \"\", \"The topic to create if it does not exist\")\n\tsubscriptionName := flag.String(\"subscription_name\", \"\", \"The subscription to create if it does not exist\")\n\tjsonMessageFile := flag.String(\"json_message_file\", \"\", \"A file that contains the JSON contents to send as the body of a pubsub message.\")\n\n\tflag.Parse()\n\ttask := strings.ToLower(flag.Arg(0))\n\n\tctx := context.Background()\n\tpsc, err := pubsub.NewClient(ctx, *projectID)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Initializing pubsub client for project %s: %s\", *projectID, err)\n\t}\n\n\tgsc, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Initializing GCS Client: %s\", err)\n\t}\n\n\tif task == \"create\" {\n\t\tif err := createTopicAndSubscription(ctx, psc, *topicName, *subscriptionName); err != nil {\n\t\t\tsklog.Fatalf(\"Making topic %s and subscription %s: %s\", *topicName, *subscriptionName, err)\n\t\t}\n\t} else if task == \"publish\" {\n\t\tif err := publishMessage(ctx, psc, *topicName, *jsonMessageFile); err != nil {\n\t\t\tsklog.Fatalf(\"Sending contents of %s to topic %s: %S\", *jsonMessageFile)\n\t\t}\n\t} else if task == \"bucket_notifications\" {\n\t\tif err := listBucketNotifications(ctx, gsc, *bucketName); err != nil {\n\t\t\tsklog.Fatalf(\"Listing bucket notifications on GCS bucket %s: %s\", *bucketName, err)\n\t\t}\n\t} else {\n\t\tsklog.Fatalf(`Invalid command: %q. Try \"create\".`, task)\n\t}\n}\n\nfunc publishMessage(ctx context.Context, psc *pubsub.Client, topic, jsonMessageFile string) error {\n\tif topic == \"\" || jsonMessageFile == \"\" {\n\t\treturn skerr.Fmt(\"Can't have empty topic or message file\")\n\t}\n\tbody, err := ioutil.ReadFile(jsonMessageFile)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"reading %s\", jsonMessageFile)\n\t}\n\tpr := psc.Topic(topic).Publish(ctx, &pubsub.Message{\n\t\tData: body,\n\t})\n\t\/\/ Blocks until message actual sent\n\t_, err = pr.Get(ctx)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\tsklog.Infof(\"Sent\")\n\treturn nil\n}\n\nfunc createTopicAndSubscription(ctx context.Context, psc *pubsub.Client, topic, sub string) error {\n\tif topic == \"\" || sub == \"\" {\n\t\treturn skerr.Fmt(\"Can't have empty topic or subscription\")\n\t}\n\t\/\/ Create the topic if it doesn't exist yet.\n\tt := psc.Topic(topic)\n\tif exists, err := t.Exists(ctx); err != nil {\n\t\treturn skerr.Fmt(\"Error checking whether topic exits: %s\", err)\n\t} else if !exists {\n\t\tif t, err = psc.CreateTopic(ctx, topic); err != nil {\n\t\t\treturn skerr.Fmt(\"Error creating pubsub topic '%s': %s\", topic, err)\n\t\t}\n\t}\n\n\t\/\/ Create the subscription if it doesn't exist.\n\ts := psc.Subscription(sub)\n\tif exists, err := s.Exists(ctx); err != nil {\n\t\treturn skerr.Fmt(\"Error checking existence of pubsub subscription '%s': %s\", sub, err)\n\t} else if !exists {\n\t\t_, err = psc.CreateSubscription(ctx, sub, pubsub.SubscriptionConfig{\n\t\t\tTopic: t,\n\t\t\tAckDeadline: 2 * time.Minute,\n\t\t\tRetentionDuration: 4 * time.Hour,\n\t\t\tRetryPolicy: &pubsub.RetryPolicy{\n\t\t\t\tMinimumBackoff: time.Minute,\n\t\t\t\tMaximumBackoff: 5 * time.Minute,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn skerr.Fmt(\"Error creating pubsub subscription '%s': %s\", sub, err)\n\t\t}\n\t}\n\tsklog.Infof(\"Topic %s and Subscription %s exist if they didn't before\", topic, sub)\n\treturn nil\n}\n\nfunc listBucketNotifications(ctx context.Context, gsc *storage.Client, bucketName string) error {\n\tbucket := gsc.Bucket(bucketName)\n\tnotifications, err := bucket.Notifications(ctx)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\tsklog.Infof(\"Retrieved: %d notifications\", len(notifications))\n\tfor _, n := range notifications {\n\t\tsklog.Infof(\"%s events under \/\/%s are published to topic %s in project %s\", n.EventTypes, n.ObjectNamePrefix, n.TopicID, n.TopicProjectID)\n\t}\n\treturn nil\n}\n<commit_msg>[gold] Add GCS bucket subscriptions to pubsubtool<commit_after>\/\/ The pubsubtool executable is a convenient way to create PubSub topics and subscriptions.\n\/\/ It also allows for manual injection of messages to test systems end-to-end.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"cloud.google.com\/go\/storage\"\n\n\t\"go.skia.org\/infra\/go\/skerr\"\n\t\"go.skia.org\/infra\/go\/sklog\"\n)\n\nfunc main() {\n\tbucketName := flag.String(\"bucket_name\", \"\", \"The GCS bucket to listen to (see bucket_notifications)\")\n\tprefix := flag.String(\"prefix\", \"\", \"The GCS prefix to listen to.\")\n\tprojectID := flag.String(\"project_id\", \"skia-public\", \"The project for PubSub events\")\n\ttopicName := flag.String(\"topic_name\", \"\", \"The topic to create if it does not exist\")\n\tsubscriptionName := flag.String(\"subscription_name\", \"\", \"The subscription to create if it does not exist\")\n\tjsonMessageFile := flag.String(\"json_message_file\", \"\", \"A file that contains the JSON contents to send as the body of a pubsub message.\")\n\n\tflag.Parse()\n\ttask := strings.ToLower(flag.Arg(0))\n\n\tctx := context.Background()\n\tpsc, err := pubsub.NewClient(ctx, *projectID)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Initializing pubsub client for project %s: %s\", *projectID, err)\n\t}\n\n\tgsc, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Initializing GCS Client: %s\", err)\n\t}\n\n\tif task == \"create\" {\n\t\tif err := createTopicAndSubscription(ctx, psc, *topicName, *subscriptionName); err != nil {\n\t\t\tsklog.Fatalf(\"Making topic %s and subscription %s: %s\", *topicName, *subscriptionName, err)\n\t\t}\n\t} else if task == \"publish\" {\n\t\tif err := publishMessage(ctx, psc, *topicName, *jsonMessageFile); err != nil {\n\t\t\tsklog.Fatalf(\"Sending contents of %s to topic %s: %S\", *jsonMessageFile)\n\t\t}\n\t} else if task == \"bucket_notifications\" {\n\t\tif err := listBucketNotifications(ctx, gsc, *bucketName); err != nil {\n\t\t\tsklog.Fatalf(\"Listing bucket notifications on GCS bucket %s: %s\", *bucketName, err)\n\t\t}\n\t} else if task == \"subscribe_to_bucket\" {\n\t\tif err := subscribeToBucket(ctx, psc, *projectID, *topicName, *subscriptionName, gsc, *bucketName, *prefix); err != nil {\n\t\t\tsklog.Fatalf(\"Creating new bucket notification: %s\", err)\n\t\t}\n\t} else {\n\t\tsklog.Fatalf(`Invalid command: %q. Try \"create\".`, task)\n\t}\n}\n\nfunc publishMessage(ctx context.Context, psc *pubsub.Client, topic, jsonMessageFile string) error {\n\tif topic == \"\" || jsonMessageFile == \"\" {\n\t\treturn skerr.Fmt(\"Can't have empty topic or message file\")\n\t}\n\tbody, err := ioutil.ReadFile(jsonMessageFile)\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"reading %s\", jsonMessageFile)\n\t}\n\tpr := psc.Topic(topic).Publish(ctx, &pubsub.Message{\n\t\tData: body,\n\t})\n\t\/\/ Blocks until message actual sent\n\t_, err = pr.Get(ctx)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\tsklog.Infof(\"Sent\")\n\treturn nil\n}\n\nfunc createTopicAndSubscription(ctx context.Context, psc *pubsub.Client, topic, sub string) error {\n\tif topic == \"\" || sub == \"\" {\n\t\treturn skerr.Fmt(\"Can't have empty topic or subscription\")\n\t}\n\t\/\/ Create the topic if it doesn't exist yet.\n\tt, err := createTopicIfNotExists(ctx, psc, topic)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\n\t\/\/ Create the subscription if it doesn't exist.\n\ts := psc.Subscription(sub)\n\tif exists, err := s.Exists(ctx); err != nil {\n\t\treturn skerr.Wrapf(err, \"checking existence of pubsub subscription %q\", sub)\n\t} else if !exists {\n\t\t_, err = psc.CreateSubscription(ctx, sub, pubsub.SubscriptionConfig{\n\t\t\tTopic: t,\n\t\t\t\/\/ These are the default values for the diff-metrics subscription\n\t\t\tAckDeadline: 2 * time.Minute,\n\t\t\tRetentionDuration: 4 * time.Hour,\n\t\t\tRetryPolicy: &pubsub.RetryPolicy{\n\t\t\t\tMinimumBackoff: time.Minute,\n\t\t\t\tMaximumBackoff: 5 * time.Minute,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn skerr.Wrapf(err, \"creating pubsub subscription %q\", sub)\n\t\t}\n\t}\n\tsklog.Infof(\"Topic %s and Subscription %s exist if they didn't before\", topic, sub)\n\treturn nil\n}\n\nfunc createTopicIfNotExists(ctx context.Context, psc *pubsub.Client, topic string) (*pubsub.Topic, error) {\n\tt := psc.Topic(topic)\n\tif exists, err := t.Exists(ctx); err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"checking whether topic %q exists\", topic)\n\t} else if !exists {\n\t\tif t, err = psc.CreateTopic(ctx, topic); err != nil {\n\t\t\treturn nil, skerr.Wrapf(err, \"creating pubsub topic %q\", topic)\n\t\t}\n\t}\n\treturn t, nil\n}\n\nfunc listBucketNotifications(ctx context.Context, gsc *storage.Client, bucketName string) error {\n\tbucket := gsc.Bucket(bucketName)\n\tnotifications, err := bucket.Notifications(ctx)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\tsklog.Infof(\"Retrieved: %d notifications\", len(notifications))\n\tfor _, n := range notifications {\n\t\tsklog.Infof(\"%s events under \/\/%s are published to topic %s in project %s\", n.EventTypes, n.ObjectNamePrefix, n.TopicID, n.TopicProjectID)\n\t}\n\treturn nil\n}\n\nfunc subscribeToBucket(ctx context.Context, psc *pubsub.Client, project, topic, subscription string, gsc *storage.Client, bucket, prefix string) error {\n\tif prefix == \"\" {\n\t\treturn skerr.Fmt(\"Must specify prefix\")\n\t}\n\tt, err := createTopicIfNotExists(ctx, psc, topic)\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\n\t_, err = gsc.Bucket(bucket).AddNotification(ctx, &storage.Notification{\n\t\tTopicID: topic,\n\t\tTopicProjectID: project,\n\t\tEventTypes: []string{storage.ObjectFinalizeEvent},\n\t\tObjectNamePrefix: prefix,\n\t\tPayloadFormat: storage.NoPayload, \/\/ We only care about properties\n\t})\n\tif err != nil {\n\t\treturn skerr.Wrapf(err, \"creating topic %s in project %s for files from gcs:\/\/%s\/%s\", topic, project, bucket, prefix)\n\t}\n\n\t\/\/ Create the subscription if it doesn't exist.\n\ts := psc.Subscription(subscription)\n\tif exists, err := s.Exists(ctx); err != nil {\n\t\treturn skerr.Wrapf(err, \"checking existence of pubsub subscription %q\", subscription)\n\t} else if !exists {\n\t\t_, err = psc.CreateSubscription(ctx, subscription, pubsub.SubscriptionConfig{\n\t\t\tTopic: t,\n\t\t\t\/\/ These are the default values for the data files subscriptions.\n\t\t\tAckDeadline: 2 * time.Minute,\n\t\t\tRetentionDuration: 2 * 24 * time.Hour,\n\t\t\tRetryPolicy: &pubsub.RetryPolicy{\n\t\t\t\tMinimumBackoff: 10 * time.Second,\n\t\t\t\tMaximumBackoff: 5 * time.Minute,\n\t\t\t},\n\t\t\t\/\/ A deadletter policy should be set up and verified via the\n\t\t\t\/\/ cloud console UI (there's usually one additional permission to grant)\n\t\t\t\/\/ Retry attempts 5 is usually fine (to prevent bad files from filling up\n\t\t\t\/\/ our PubSub queue).\n\t\t})\n\t\tif err != nil {\n\t\t\treturn skerr.Wrapf(err, \"creating pubsub subscription %q\", subscription)\n\t\t}\n\t} else {\n\t\tsklog.Infof(\"Subscription %q already existed\", subscription)\n\t}\n\tsklog.Infof(\"Subscription %s ready to listen to topic %s which gets events from files created in gs:\/\/%s\/%s\", subscription, topic, bucket, prefix)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package testing provides support for automated testing of Go packages.\n\/\/ It is intended to be used in concert with the ``gotest'' utility, which automates\n\/\/ execution of any function of the form\n\/\/ func TestXxx(*testing.T)\n\/\/ where Xxx can be any alphanumeric string (but the first letter must not be in\n\/\/ [a-z]) and serves to identify the test routine.\n\/\/ These TestXxx routines should be declared within the package they are testing.\n\/\/\n\/\/ Functions of the form\n\/\/ func BenchmarkXxx(*testing.B)\n\/\/ are considered benchmarks, and are executed by gotest when the -test.bench\n\/\/ flag is provided.\n\/\/\n\/\/ A sample benchmark function looks like this:\n\/\/ func BenchmarkHello(b *testing.B) {\n\/\/ for i := 0; i < b.N; i++ {\n\/\/ fmt.Sprintf(\"hello\")\n\/\/ }\n\/\/ }\n\/\/ The benchmark package will vary b.N until the benchmark function lasts\n\/\/ long enough to be timed reliably. The output\n\/\/ testing.BenchmarkHello\t500000\t 4076 ns\/op\n\/\/ means that the loop ran 500000 times at a speed of 4076 ns per loop.\n\/\/\n\/\/ If a benchmark needs some expensive setup before running, the timer\n\/\/ may be stopped:\n\/\/ func BenchmarkBigLen(b *testing.B) {\n\/\/ b.StopTimer()\n\/\/ big := NewBig()\n\/\/ b.StartTimer()\n\/\/ for i := 0; i < b.N; i++ {\n\/\/ big.Len()\n\/\/ }\n\/\/ }\npackage testing\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t\/\/ The short flag requests that tests run more quickly, but its functionality\n\t\/\/ is provided by test writers themselves. The testing package is just its\n\t\/\/ home. The all.bash installation script sets it to make installation more\n\t\/\/ efficient, but by default the flag is off so a plain \"gotest\" will do a\n\t\/\/ full test of the package.\n\tshort = flag.Bool(\"test.short\", false, \"run smaller test suite to save time\")\n\n\t\/\/ Report as tests are run; default is silent for success.\n\tchatty = flag.Bool(\"test.v\", false, \"verbose: print additional output\")\n\tmatch = flag.String(\"test.run\", \"\", \"regular expression to select tests to run\")\n\tmemProfile = flag.String(\"test.memprofile\", \"\", \"write a memory profile to the named file after execution\")\n\tmemProfileRate = flag.Int(\"test.memprofilerate\", 0, \"if >=0, sets runtime.MemProfileRate\")\n\tcpuProfile = flag.String(\"test.cpuprofile\", \"\", \"write a cpu profile to the named file during execution\")\n\ttimeout = flag.Int64(\"test.timeout\", 0, \"if > 0, sets time limit for tests in seconds\")\n\tcpuListStr = flag.String(\"test.cpu\", \"\", \"comma-separated list of number of CPUs to use for each test\")\n\tparallel = flag.Int(\"test.parallel\", runtime.GOMAXPROCS(0), \"maximum test parallelism\")\n\n\tcpuList []int\n)\n\n\/\/ Short reports whether the -test.short flag is set.\nfunc Short() bool {\n\treturn *short\n}\n\n\/\/ Insert final newline if needed and tabs after internal newlines.\nfunc tabify(s string) string {\n\tn := len(s)\n\tif n > 0 && s[n-1] != '\\n' {\n\t\ts += \"\\n\"\n\t\tn++\n\t}\n\tfor i := 0; i < n-1; i++ { \/\/ -1 to avoid final newline\n\t\tif s[i] == '\\n' {\n\t\t\treturn s[0:i+1] + \"\\t\" + tabify(s[i+1:n])\n\t\t}\n\t}\n\treturn s\n}\n\n\/\/ T is a type passed to Test functions to manage test state and support formatted test logs.\n\/\/ Logs are accumulated during execution and dumped to standard error when done.\ntype T struct {\n\tname string \/\/ Name of test.\n\terrors string \/\/ Error string from test.\n\tfailed bool \/\/ Test has failed.\n\tch chan *T \/\/ Output for serial tests.\n\tstartParallel chan bool \/\/ Parallel tests will wait on this.\n\tns int64 \/\/ Duration of test in nanoseconds.\n}\n\n\/\/ Fail marks the Test function as having failed but continues execution.\nfunc (t *T) Fail() { t.failed = true }\n\n\/\/ Failed returns whether the Test function has failed.\nfunc (t *T) Failed() bool { return t.failed }\n\n\/\/ FailNow marks the Test function as having failed and stops its execution.\n\/\/ Execution will continue at the next Test.\nfunc (t *T) FailNow() {\n\tt.ns = time.Nanoseconds() - t.ns\n\tt.Fail()\n\tt.ch <- t\n\truntime.Goexit()\n}\n\n\/\/ Log formats its arguments using default formatting, analogous to Print(),\n\/\/ and records the text in the error log.\nfunc (t *T) Log(args ...interface{}) { t.errors += \"\\t\" + tabify(fmt.Sprintln(args...)) }\n\n\/\/ Logf formats its arguments according to the format, analogous to Printf(),\n\/\/ and records the text in the error log.\nfunc (t *T) Logf(format string, args ...interface{}) {\n\tt.errors += \"\\t\" + tabify(fmt.Sprintf(format, args...))\n}\n\n\/\/ Error is equivalent to Log() followed by Fail().\nfunc (t *T) Error(args ...interface{}) {\n\tt.Log(args...)\n\tt.Fail()\n}\n\n\/\/ Errorf is equivalent to Logf() followed by Fail().\nfunc (t *T) Errorf(format string, args ...interface{}) {\n\tt.Logf(format, args...)\n\tt.Fail()\n}\n\n\/\/ Fatal is equivalent to Log() followed by FailNow().\nfunc (t *T) Fatal(args ...interface{}) {\n\tt.Log(args...)\n\tt.FailNow()\n}\n\n\/\/ Fatalf is equivalent to Logf() followed by FailNow().\nfunc (t *T) Fatalf(format string, args ...interface{}) {\n\tt.Logf(format, args...)\n\tt.FailNow()\n}\n\n\/\/ Parallel signals that this test is to be run in parallel with (and only with) \n\/\/ other parallel tests in this CPU group.\nfunc (t *T) Parallel() {\n\tt.ch <- nil \/\/ Release main testing loop\n\t<-t.startParallel \/\/ Wait for serial tests to finish\n}\n\n\/\/ An internal type but exported because it is cross-package; part of the implementation\n\/\/ of gotest.\ntype InternalTest struct {\n\tName string\n\tF func(*T)\n}\n\nfunc tRunner(t *T, test *InternalTest) {\n\tt.ns = time.Nanoseconds()\n\ttest.F(t)\n\tt.ns = time.Nanoseconds() - t.ns\n\tt.ch <- t\n}\n\n\/\/ An internal function but exported because it is cross-package; part of the implementation\n\/\/ of gotest.\nfunc Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {\n\tflag.Parse()\n\tparseCpuList()\n\n\tbefore()\n\tstartAlarm()\n\ttestOk := RunTests(matchString, tests)\n\texampleOk := RunExamples(examples)\n\tif !testOk || !exampleOk {\n\t\tfmt.Fprintln(os.Stderr, \"FAIL\")\n\t\tos.Exit(1)\n\t}\n\tfmt.Fprintln(os.Stderr, \"PASS\")\n\tstopAlarm()\n\tRunBenchmarks(matchString, benchmarks)\n\tafter()\n}\n\nfunc report(t *T) {\n\ttstr := fmt.Sprintf(\"(%.2f seconds)\", float64(t.ns)\/1e9)\n\tformat := \"--- %s: %s %s\\n%s\"\n\tif t.failed {\n\t\tfmt.Fprintf(os.Stderr, format, \"FAIL\", t.name, tstr, t.errors)\n\t} else if *chatty {\n\t\tfmt.Fprintf(os.Stderr, format, \"PASS\", t.name, tstr, t.errors)\n\t}\n}\n\nfunc RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {\n\tok = true\n\tif len(tests) == 0 {\n\t\tfmt.Fprintln(os.Stderr, \"testing: warning: no tests to run\")\n\t\treturn\n\t}\n\tch := make(chan *T)\n\tfor _, procs := range cpuList {\n\t\truntime.GOMAXPROCS(procs)\n\n\t\tnumParallel := 0\n\t\tstartParallel := make(chan bool)\n\n\t\tfor i := 0; i < len(tests); i++ {\n\t\t\tmatched, err := matchString(*match, tests[i].Name)\n\t\t\tif err != nil {\n\t\t\t\tprintln(\"invalid regexp for -test.run:\", err.Error())\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tif !matched {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttestName := tests[i].Name\n\t\t\tif procs != 1 {\n\t\t\t\ttestName = fmt.Sprintf(\"%s-%d\", tests[i].Name, procs)\n\t\t\t}\n\t\t\tt := &T{ch: ch, name: testName, startParallel: startParallel}\n\t\t\tif *chatty {\n\t\t\t\tprintln(\"=== RUN\", t.name)\n\t\t\t}\n\t\t\tgo tRunner(t, &tests[i])\n\t\t\tout := <-t.ch\n\t\t\tif out == nil { \/\/ Parallel run.\n\t\t\t\tnumParallel++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treport(t)\n\t\t\tok = ok && !out.failed\n\t\t}\n\n\t\trunning := 0\n\t\tfor numParallel+running > 0 {\n\t\t\tif running < *parallel && numParallel > 0 {\n\t\t\t\tstartParallel <- true\n\t\t\t\trunning++\n\t\t\t\tnumParallel--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt := <-ch\n\t\t\treport(t)\n\t\t\tok = ok && !t.failed\n\t\t\trunning--\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ before runs before all testing.\nfunc before() {\n\tif *memProfileRate > 0 {\n\t\truntime.MemProfileRate = *memProfileRate\n\t}\n\tif *cpuProfile != \"\" {\n\t\tf, err := os.Create(*cpuProfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := pprof.StartCPUProfile(f); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: can't start cpu profile: %s\", err)\n\t\t\tf.Close()\n\t\t\treturn\n\t\t}\n\t\t\/\/ Could save f so after can call f.Close; not worth the effort.\n\t}\n\n}\n\n\/\/ after runs after all testing.\nfunc after() {\n\tif *cpuProfile != \"\" {\n\t\tpprof.StopCPUProfile() \/\/ flushes profile to disk\n\t}\n\tif *memProfile != \"\" {\n\t\tf, err := os.Create(*memProfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = pprof.WriteHeapProfile(f); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: can't write %s: %s\", *memProfile, err)\n\t\t}\n\t\tf.Close()\n\t}\n}\n\nvar timer *time.Timer\n\n\/\/ startAlarm starts an alarm if requested.\nfunc startAlarm() {\n\tif *timeout > 0 {\n\t\ttimer = time.AfterFunc(*timeout*1e9, alarm)\n\t}\n}\n\n\/\/ stopAlarm turns off the alarm.\nfunc stopAlarm() {\n\tif *timeout > 0 {\n\t\ttimer.Stop()\n\t}\n}\n\n\/\/ alarm is called if the timeout expires.\nfunc alarm() {\n\tpanic(\"test timed out\")\n}\n\nfunc parseCpuList() {\n\tif len(*cpuListStr) == 0 {\n\t\tcpuList = append(cpuList, runtime.GOMAXPROCS(-1))\n\t} else {\n\t\tfor _, val := range strings.Split(*cpuListStr, \",\") {\n\t\t\tcpu, err := strconv.Atoi(val)\n\t\t\tif err != nil || cpu <= 0 {\n\t\t\t\tprintln(\"invalid value for -test.cpu\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tcpuList = append(cpuList, cpu)\n\t\t}\n\t}\n}\n<commit_msg>testing: add file:line stamps to messages.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package testing provides support for automated testing of Go packages.\n\/\/ It is intended to be used in concert with the ``gotest'' utility, which automates\n\/\/ execution of any function of the form\n\/\/ func TestXxx(*testing.T)\n\/\/ where Xxx can be any alphanumeric string (but the first letter must not be in\n\/\/ [a-z]) and serves to identify the test routine.\n\/\/ These TestXxx routines should be declared within the package they are testing.\n\/\/\n\/\/ Functions of the form\n\/\/ func BenchmarkXxx(*testing.B)\n\/\/ are considered benchmarks, and are executed by gotest when the -test.bench\n\/\/ flag is provided.\n\/\/\n\/\/ A sample benchmark function looks like this:\n\/\/ func BenchmarkHello(b *testing.B) {\n\/\/ for i := 0; i < b.N; i++ {\n\/\/ fmt.Sprintf(\"hello\")\n\/\/ }\n\/\/ }\n\/\/ The benchmark package will vary b.N until the benchmark function lasts\n\/\/ long enough to be timed reliably. The output\n\/\/ testing.BenchmarkHello\t500000\t 4076 ns\/op\n\/\/ means that the loop ran 500000 times at a speed of 4076 ns per loop.\n\/\/\n\/\/ If a benchmark needs some expensive setup before running, the timer\n\/\/ may be stopped:\n\/\/ func BenchmarkBigLen(b *testing.B) {\n\/\/ b.StopTimer()\n\/\/ big := NewBig()\n\/\/ b.StartTimer()\n\/\/ for i := 0; i < b.N; i++ {\n\/\/ big.Len()\n\/\/ }\n\/\/ }\npackage testing\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t\/\/ The short flag requests that tests run more quickly, but its functionality\n\t\/\/ is provided by test writers themselves. The testing package is just its\n\t\/\/ home. The all.bash installation script sets it to make installation more\n\t\/\/ efficient, but by default the flag is off so a plain \"gotest\" will do a\n\t\/\/ full test of the package.\n\tshort = flag.Bool(\"test.short\", false, \"run smaller test suite to save time\")\n\n\t\/\/ Report as tests are run; default is silent for success.\n\tchatty = flag.Bool(\"test.v\", false, \"verbose: print additional output\")\n\tmatch = flag.String(\"test.run\", \"\", \"regular expression to select tests to run\")\n\tmemProfile = flag.String(\"test.memprofile\", \"\", \"write a memory profile to the named file after execution\")\n\tmemProfileRate = flag.Int(\"test.memprofilerate\", 0, \"if >=0, sets runtime.MemProfileRate\")\n\tcpuProfile = flag.String(\"test.cpuprofile\", \"\", \"write a cpu profile to the named file during execution\")\n\ttimeout = flag.Int64(\"test.timeout\", 0, \"if > 0, sets time limit for tests in seconds\")\n\tcpuListStr = flag.String(\"test.cpu\", \"\", \"comma-separated list of number of CPUs to use for each test\")\n\tparallel = flag.Int(\"test.parallel\", runtime.GOMAXPROCS(0), \"maximum test parallelism\")\n\n\tcpuList []int\n)\n\n\/\/ Short reports whether the -test.short flag is set.\nfunc Short() bool {\n\treturn *short\n}\n\n\/\/ decorate inserts the a final newline if needed and indentation tabs for formatting.\n\/\/ If addFileLine is true, it also prefixes the string with the file and line of the call site.\nfunc decorate(s string, addFileLine bool) string {\n\tif addFileLine {\n\t\t_, file, line, ok := runtime.Caller(3) \/\/ decorate + log + public function.\n\t\tif ok {\n\t\t\t\/\/ Truncate file name at last file name separator.\n\t\t\tif index := strings.LastIndex(file, \"\/\"); index >= 0 {\n\t\t\t\tfile = file[index+1:]\n\t\t\t} else if index = strings.LastIndex(file, \"\\\\\"); index >= 0 {\n\t\t\t\tfile = file[index+1:]\n\t\t\t}\n\t\t} else {\n\t\t\tfile = \"???\"\n\t\t\tline = 1\n\t\t}\n\t\ts = fmt.Sprintf(\"%s:%d: %s\", file, line, s)\n\t}\n\ts = \"\\t\" + s \/\/ Every line is indented at least one tab.\n\tn := len(s)\n\tif n > 0 && s[n-1] != '\\n' {\n\t\ts += \"\\n\"\n\t\tn++\n\t}\n\tfor i := 0; i < n-1; i++ { \/\/ -1 to avoid final newline\n\t\tif s[i] == '\\n' {\n\t\t\t\/\/ Second and subsequent lines are indented an extra tab.\n\t\t\treturn s[0:i+1] + \"\\t\" + decorate(s[i+1:n], false)\n\t\t}\n\t}\n\treturn s\n}\n\n\/\/ T is a type passed to Test functions to manage test state and support formatted test logs.\n\/\/ Logs are accumulated during execution and dumped to standard error when done.\ntype T struct {\n\tname string \/\/ Name of test.\n\terrors string \/\/ Error string from test.\n\tfailed bool \/\/ Test has failed.\n\tch chan *T \/\/ Output for serial tests.\n\tstartParallel chan bool \/\/ Parallel tests will wait on this.\n\tns int64 \/\/ Duration of test in nanoseconds.\n}\n\n\/\/ Fail marks the Test function as having failed but continues execution.\nfunc (t *T) Fail() { t.failed = true }\n\n\/\/ Failed returns whether the Test function has failed.\nfunc (t *T) Failed() bool { return t.failed }\n\n\/\/ FailNow marks the Test function as having failed and stops its execution.\n\/\/ Execution will continue at the next Test.\nfunc (t *T) FailNow() {\n\tt.ns = time.Nanoseconds() - t.ns\n\tt.Fail()\n\tt.ch <- t\n\truntime.Goexit()\n}\n\n\/\/ log generates the output. It's always at the same stack depth.\nfunc (t *T) log(s string) { t.errors += decorate(s, true) }\n\n\/\/ Log formats its arguments using default formatting, analogous to Print(),\n\/\/ and records the text in the error log.\nfunc (t *T) Log(args ...interface{}) { t.log(fmt.Sprintln(args...)) }\n\n\/\/ Logf formats its arguments according to the format, analogous to Printf(),\n\/\/ and records the text in the error log.\nfunc (t *T) Logf(format string, args ...interface{}) { t.log(fmt.Sprintf(format, args...)) }\n\n\/\/ Error is equivalent to Log() followed by Fail().\nfunc (t *T) Error(args ...interface{}) {\n\tt.log(fmt.Sprintln(args...))\n\tt.Fail()\n}\n\n\/\/ Errorf is equivalent to Logf() followed by Fail().\nfunc (t *T) Errorf(format string, args ...interface{}) {\n\tt.log(fmt.Sprintf(format, args...))\n\tt.Fail()\n}\n\n\/\/ Fatal is equivalent to Log() followed by FailNow().\nfunc (t *T) Fatal(args ...interface{}) {\n\tt.log(fmt.Sprintln(args...))\n\tt.FailNow()\n}\n\n\/\/ Fatalf is equivalent to Logf() followed by FailNow().\nfunc (t *T) Fatalf(format string, args ...interface{}) {\n\tt.log(fmt.Sprintf(format, args...))\n\tt.FailNow()\n}\n\n\/\/ Parallel signals that this test is to be run in parallel with (and only with) \n\/\/ other parallel tests in this CPU group.\nfunc (t *T) Parallel() {\n\tt.ch <- nil \/\/ Release main testing loop\n\t<-t.startParallel \/\/ Wait for serial tests to finish\n}\n\n\/\/ An internal type but exported because it is cross-package; part of the implementation\n\/\/ of gotest.\ntype InternalTest struct {\n\tName string\n\tF func(*T)\n}\n\nfunc tRunner(t *T, test *InternalTest) {\n\tt.ns = time.Nanoseconds()\n\ttest.F(t)\n\tt.ns = time.Nanoseconds() - t.ns\n\tt.ch <- t\n}\n\n\/\/ An internal function but exported because it is cross-package; part of the implementation\n\/\/ of gotest.\nfunc Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {\n\tflag.Parse()\n\tparseCpuList()\n\n\tbefore()\n\tstartAlarm()\n\ttestOk := RunTests(matchString, tests)\n\texampleOk := RunExamples(examples)\n\tif !testOk || !exampleOk {\n\t\tfmt.Fprintln(os.Stderr, \"FAIL\")\n\t\tos.Exit(1)\n\t}\n\tfmt.Fprintln(os.Stderr, \"PASS\")\n\tstopAlarm()\n\tRunBenchmarks(matchString, benchmarks)\n\tafter()\n}\n\nfunc report(t *T) {\n\ttstr := fmt.Sprintf(\"(%.2f seconds)\", float64(t.ns)\/1e9)\n\tformat := \"--- %s: %s %s\\n%s\"\n\tif t.failed {\n\t\tfmt.Fprintf(os.Stderr, format, \"FAIL\", t.name, tstr, t.errors)\n\t} else if *chatty {\n\t\tfmt.Fprintf(os.Stderr, format, \"PASS\", t.name, tstr, t.errors)\n\t}\n}\n\nfunc RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {\n\tok = true\n\tif len(tests) == 0 {\n\t\tfmt.Fprintln(os.Stderr, \"testing: warning: no tests to run\")\n\t\treturn\n\t}\n\tch := make(chan *T)\n\tfor _, procs := range cpuList {\n\t\truntime.GOMAXPROCS(procs)\n\n\t\tnumParallel := 0\n\t\tstartParallel := make(chan bool)\n\n\t\tfor i := 0; i < len(tests); i++ {\n\t\t\tmatched, err := matchString(*match, tests[i].Name)\n\t\t\tif err != nil {\n\t\t\t\tprintln(\"invalid regexp for -test.run:\", err.Error())\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tif !matched {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttestName := tests[i].Name\n\t\t\tif procs != 1 {\n\t\t\t\ttestName = fmt.Sprintf(\"%s-%d\", tests[i].Name, procs)\n\t\t\t}\n\t\t\tt := &T{ch: ch, name: testName, startParallel: startParallel}\n\t\t\tif *chatty {\n\t\t\t\tprintln(\"=== RUN\", t.name)\n\t\t\t}\n\t\t\tgo tRunner(t, &tests[i])\n\t\t\tout := <-t.ch\n\t\t\tif out == nil { \/\/ Parallel run.\n\t\t\t\tnumParallel++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treport(t)\n\t\t\tok = ok && !out.failed\n\t\t}\n\n\t\trunning := 0\n\t\tfor numParallel+running > 0 {\n\t\t\tif running < *parallel && numParallel > 0 {\n\t\t\t\tstartParallel <- true\n\t\t\t\trunning++\n\t\t\t\tnumParallel--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt := <-ch\n\t\t\treport(t)\n\t\t\tok = ok && !t.failed\n\t\t\trunning--\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ before runs before all testing.\nfunc before() {\n\tif *memProfileRate > 0 {\n\t\truntime.MemProfileRate = *memProfileRate\n\t}\n\tif *cpuProfile != \"\" {\n\t\tf, err := os.Create(*cpuProfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := pprof.StartCPUProfile(f); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: can't start cpu profile: %s\", err)\n\t\t\tf.Close()\n\t\t\treturn\n\t\t}\n\t\t\/\/ Could save f so after can call f.Close; not worth the effort.\n\t}\n\n}\n\n\/\/ after runs after all testing.\nfunc after() {\n\tif *cpuProfile != \"\" {\n\t\tpprof.StopCPUProfile() \/\/ flushes profile to disk\n\t}\n\tif *memProfile != \"\" {\n\t\tf, err := os.Create(*memProfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = pprof.WriteHeapProfile(f); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"testing: can't write %s: %s\", *memProfile, err)\n\t\t}\n\t\tf.Close()\n\t}\n}\n\nvar timer *time.Timer\n\n\/\/ startAlarm starts an alarm if requested.\nfunc startAlarm() {\n\tif *timeout > 0 {\n\t\ttimer = time.AfterFunc(*timeout*1e9, alarm)\n\t}\n}\n\n\/\/ stopAlarm turns off the alarm.\nfunc stopAlarm() {\n\tif *timeout > 0 {\n\t\ttimer.Stop()\n\t}\n}\n\n\/\/ alarm is called if the timeout expires.\nfunc alarm() {\n\tpanic(\"test timed out\")\n}\n\nfunc parseCpuList() {\n\tif len(*cpuListStr) == 0 {\n\t\tcpuList = append(cpuList, runtime.GOMAXPROCS(-1))\n\t} else {\n\t\tfor _, val := range strings.Split(*cpuListStr, \",\") {\n\t\t\tcpu, err := strconv.Atoi(val)\n\t\t\tif err != nil || cpu <= 0 {\n\t\t\t\tprintln(\"invalid value for -test.cpu\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tcpuList = append(cpuList, cpu)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package client_fakes\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tservices \"github.com\/maximilien\/softlayer-go\/services\"\n\tsoftlayer \"github.com\/maximilien\/softlayer-go\/softlayer\"\n)\n\nconst (\n\tSOFTLAYER_API_URL = \"api.softlayer.com\/rest\/v3\"\n\tTEMPLATE_ROOT_PATH = \"templates\"\n)\n\ntype FakeSoftLayerClient struct {\n\tUsername string\n\tApiKey string\n\n\tTemplatePath string\n\n\tSoftLayerServices map[string]softlayer.Service\n\n\tDoRawHttpRequestResponseCount int\n\n\tDoRawHttpRequestResponse []byte\n\tDoRawHttpRequestResponses [][]byte\n\tDoRawHttpRequestResponsesIndex int\n\tDoRawHttpRequestError error\n\tDoRawHttpRequestPath string\n\tDoRawHttpRequestRequestType string\n\n\tGenerateRequestBodyBuffer *bytes.Buffer\n\tGenerateRequestBodyError error\n\n\tHasErrorsError, CheckForHttpResponseError error\n}\n\nfunc NewFakeSoftLayerClient(username, apiKey string) *FakeSoftLayerClient {\n\tpwd, _ := os.Getwd()\n\tfslc := &FakeSoftLayerClient{\n\t\tUsername: username,\n\t\tApiKey: apiKey,\n\n\t\tTemplatePath: filepath.Join(pwd, TEMPLATE_ROOT_PATH),\n\n\t\tSoftLayerServices: map[string]softlayer.Service{},\n\n\t\tDoRawHttpRequestResponseCount: 0,\n\n\t\tDoRawHttpRequestResponse: nil,\n\t\tDoRawHttpRequestResponses: [][]byte{},\n\t\tDoRawHttpRequestResponsesIndex: 0,\n\t\tDoRawHttpRequestError: nil,\n\t\tDoRawHttpRequestPath: \"\",\n\t\tDoRawHttpRequestRequestType: \"\",\n\n\t\tGenerateRequestBodyBuffer: new(bytes.Buffer),\n\t\tGenerateRequestBodyError: nil,\n\n\t\tHasErrorsError: nil,\n\t\tCheckForHttpResponseError: nil,\n\t}\n\n\tfslc.initSoftLayerServices()\n\n\treturn fslc\n}\n\n\/\/softlayer.Client interface methods\n\nfunc (fslc *FakeSoftLayerClient) GetService(serviceName string) (softlayer.Service, error) {\n\tslService, ok := fslc.SoftLayerServices[serviceName]\n\tif !ok {\n\t\treturn nil, errors.New(fmt.Sprintf(\"softlayer-go does not support service '%s'\", serviceName))\n\t}\n\n\treturn slService, nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Account_Service() (softlayer.SoftLayer_Account_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Account\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Account_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Virtual_Guest_Service() (softlayer.SoftLayer_Virtual_Guest_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Virtual_Guest\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Virtual_Guest_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Dns_Domain_Service() (softlayer.SoftLayer_Dns_Domain_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Dns_Domain\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Dns_Domain_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Virtual_Disk_Image_Service() (softlayer.SoftLayer_Virtual_Disk_Image_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Virtual_Disk_Image\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Virtual_Disk_Image_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Security_Ssh_Key_Service() (softlayer.SoftLayer_Security_Ssh_Key_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Security_Ssh_Key\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Security_Ssh_Key_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Network_Storage_Service() (softlayer.SoftLayer_Network_Storage_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Network_Storage\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Network_Storage_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Network_Storage_Allowed_Host_Service() (softlayer.SoftLayer_Network_Storage_Allowed_Host_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Network_Storage_Allowed_Host\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Network_Storage_Allowed_Host_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Product_Order_Service() (softlayer.SoftLayer_Product_Order_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Product_Order\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Product_Order_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Product_Package_Service() (softlayer.SoftLayer_Product_Package_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Product_Package\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Product_Package_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Billing_Item_Cancellation_Request_Service() (softlayer.SoftLayer_Billing_Item_Cancellation_Request_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Billing_Item_Cancellation_Request\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Billing_Item_Cancellation_Request_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Virtual_Guest_Block_Device_Template_Group_Service() (softlayer.SoftLayer_Virtual_Guest_Block_Device_Template_Group_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Virtual_Guest_Block_Device_Template_Group_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Hardware_Service() (softlayer.SoftLayer_Hardware_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Hardware\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Hardware_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Dns_Domain_Record_Service() (softlayer.SoftLayer_Dns_Domain_Record_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Dns_Domain_ResourceRecord\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Dns_Domain_Record_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_SoftLayer_Dns_Domain_Record_SRV_Service() (softlayer.SoftLayer_Dns_Domain_Record_SRV_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Dns_Domain_ResourceRecord_SrvType\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Dns_Domain_Record_SRV_Service), nil\n}\n\n\/\/Public methods\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequestWithObjectMask(path string, masks []string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequestWithObjectFilter(path string, filters string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequestWithObjectFilterAndObjectMask(path string, masks []string, filters string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequest(path string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) GenerateRequestBody(templateData interface{}) (*bytes.Buffer, error) {\n\treturn fslc.GenerateRequestBodyBuffer, fslc.GenerateRequestBodyError\n}\n\nfunc (fslc *FakeSoftLayerClient) HasErrors(body map[string]interface{}) error {\n\treturn fslc.HasErrorsError\n}\n\nfunc (fslc *FakeSoftLayerClient) CheckForHttpResponseErrors(data []byte) error {\n\treturn fslc.CheckForHttpResponseError\n}\n\n\/\/Private methods\n\nfunc (fslc *FakeSoftLayerClient) initSoftLayerServices() {\n\tfslc.SoftLayerServices[\"SoftLayer_Account\"] = services.NewSoftLayer_Account_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Virtual_Guest\"] = services.NewSoftLayer_Virtual_Guest_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Virtual_Disk_Image\"] = services.NewSoftLayer_Virtual_Disk_Image_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Security_Ssh_Key\"] = services.NewSoftLayer_Security_Ssh_Key_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Network_Storage\"] = services.NewSoftLayer_Network_Storage_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Network_Storage_Allowed_Host\"] = services.NewSoftLayer_Network_Storage_Allowed_Host_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Product_Order\"] = services.NewSoftLayer_Product_Order_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Product_Package\"] = services.NewSoftLayer_Product_Package_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Billing_Item_Cancellation_Request\"] = services.NewSoftLayer_Billing_Item_Cancellation_Request_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\"] = services.NewSoftLayer_Virtual_Guest_Block_Device_Template_Group_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Hardware\"] = services.NewSoftLayer_Hardware_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Dns_Domain\"] = services.NewSoftLayer_Dns_Domain_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Dns_Domain_ResourceRecord\"] = services.NewSoftLayer_Dns_Domain_Record_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Dns_Domain_ResourceRecord_SrvType\"] = services.NewSoftLayer_Dns_Domain_Record_SRV_Service(fslc)\n}\n<commit_msg>Update softlayer_client_fake.go<commit_after>package client_fakes\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tservices \"github.com\/maximilien\/softlayer-go\/services\"\n\tsoftlayer \"github.com\/maximilien\/softlayer-go\/softlayer\"\n)\n\nconst (\n\tSOFTLAYER_API_URL = \"api.softlayer.com\/rest\/v3\"\n\tTEMPLATE_ROOT_PATH = \"templates\"\n)\n\ntype FakeSoftLayerClient struct {\n\tUsername string\n\tApiKey string\n\n\tTemplatePath string\n\n\tSoftLayerServices map[string]softlayer.Service\n\n\tDoRawHttpRequestResponseCount int\n\n\tDoRawHttpRequestResponse []byte\n\tDoRawHttpRequestResponses [][]byte\n\tDoRawHttpRequestResponsesIndex int\n\tDoRawHttpRequestError error\n\tDoRawHttpRequestPath string\n\tDoRawHttpRequestRequestType string\n\n\tGenerateRequestBodyBuffer *bytes.Buffer\n\tGenerateRequestBodyError error\n\n\tHasErrorsError, CheckForHttpResponseError error\n}\n\nfunc NewFakeSoftLayerClient(username, apiKey string) *FakeSoftLayerClient {\n\tpwd, _ := os.Getwd()\n\tfslc := &FakeSoftLayerClient{\n\t\tUsername: username,\n\t\tApiKey: apiKey,\n\n\t\tTemplatePath: filepath.Join(pwd, TEMPLATE_ROOT_PATH),\n\n\t\tSoftLayerServices: map[string]softlayer.Service{},\n\n\t\tDoRawHttpRequestResponseCount: 0,\n\n\t\tDoRawHttpRequestResponse: nil,\n\t\tDoRawHttpRequestResponses: [][]byte{},\n\t\tDoRawHttpRequestResponsesIndex: 0,\n\t\tDoRawHttpRequestError: nil,\n\t\tDoRawHttpRequestPath: \"\",\n\t\tDoRawHttpRequestRequestType: \"\",\n\n\t\tGenerateRequestBodyBuffer: new(bytes.Buffer),\n\t\tGenerateRequestBodyError: nil,\n\n\t\tHasErrorsError: nil,\n\t\tCheckForHttpResponseError: nil,\n\t}\n\n\tfslc.initSoftLayerServices()\n\n\treturn fslc\n}\n\n\/\/softlayer.Client interface methods\n\nfunc (fslc *FakeSoftLayerClient) GetService(serviceName string) (softlayer.Service, error) {\n\tslService, ok := fslc.SoftLayerServices[serviceName]\n\tif !ok {\n\t\treturn nil, errors.New(fmt.Sprintf(\"softlayer-go does not support service '%s'\", serviceName))\n\t}\n\n\treturn slService, nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Account_Service() (softlayer.SoftLayer_Account_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Account\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Account_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Virtual_Guest_Service() (softlayer.SoftLayer_Virtual_Guest_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Virtual_Guest\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Virtual_Guest_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Dns_Domain_Service() (softlayer.SoftLayer_Dns_Domain_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Dns_Domain\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Dns_Domain_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Virtual_Disk_Image_Service() (softlayer.SoftLayer_Virtual_Disk_Image_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Virtual_Disk_Image\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Virtual_Disk_Image_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Security_Ssh_Key_Service() (softlayer.SoftLayer_Security_Ssh_Key_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Security_Ssh_Key\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Security_Ssh_Key_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Network_Storage_Service() (softlayer.SoftLayer_Network_Storage_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Network_Storage\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Network_Storage_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Network_Storage_Allowed_Host_Service() (softlayer.SoftLayer_Network_Storage_Allowed_Host_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Network_Storage_Allowed_Host\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Network_Storage_Allowed_Host_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Product_Order_Service() (softlayer.SoftLayer_Product_Order_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Product_Order\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Product_Order_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Product_Package_Service() (softlayer.SoftLayer_Product_Package_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Product_Package\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Product_Package_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Billing_Item_Cancellation_Request_Service() (softlayer.SoftLayer_Billing_Item_Cancellation_Request_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Billing_Item_Cancellation_Request\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Billing_Item_Cancellation_Request_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Virtual_Guest_Block_Device_Template_Group_Service() (softlayer.SoftLayer_Virtual_Guest_Block_Device_Template_Group_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Virtual_Guest_Block_Device_Template_Group_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Hardware_Service() (softlayer.SoftLayer_Hardware_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Hardware\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Hardware_Service), nil\n}\n\nfunc (fslc *FakeSoftLayerClient) GetSoftLayer_Dns_Domain_Record_Service() (softlayer.SoftLayer_Dns_Domain_Record_Service, error) {\n\tslService, err := fslc.GetService(\"SoftLayer_Dns_Domain_ResourceRecord\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slService.(softlayer.SoftLayer_Dns_Domain_Record_Service), nil\n}\n\n\/\/Public methods\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequestWithObjectMask(path string, masks []string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequestWithObjectFilter(path string, filters string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequestWithObjectFilterAndObjectMask(path string, masks []string, filters string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) DoRawHttpRequest(path string, requestType string, requestBody *bytes.Buffer) ([]byte, error) {\n\tfslc.DoRawHttpRequestPath = path\n\tfslc.DoRawHttpRequestRequestType = requestType\n\n\tfslc.DoRawHttpRequestResponseCount += 1\n\n\tif fslc.DoRawHttpRequestError != nil {\n\t\treturn []byte{}, fslc.DoRawHttpRequestError\n\t}\n\n\tif fslc.DoRawHttpRequestResponse != nil {\n\t\treturn fslc.DoRawHttpRequestResponse, fslc.DoRawHttpRequestError\n\t} else {\n\t\tfslc.DoRawHttpRequestResponsesIndex = fslc.DoRawHttpRequestResponsesIndex + 1\n\t\treturn fslc.DoRawHttpRequestResponses[fslc.DoRawHttpRequestResponsesIndex-1], fslc.DoRawHttpRequestError\n\t}\n}\n\nfunc (fslc *FakeSoftLayerClient) GenerateRequestBody(templateData interface{}) (*bytes.Buffer, error) {\n\treturn fslc.GenerateRequestBodyBuffer, fslc.GenerateRequestBodyError\n}\n\nfunc (fslc *FakeSoftLayerClient) HasErrors(body map[string]interface{}) error {\n\treturn fslc.HasErrorsError\n}\n\nfunc (fslc *FakeSoftLayerClient) CheckForHttpResponseErrors(data []byte) error {\n\treturn fslc.CheckForHttpResponseError\n}\n\n\/\/Private methods\n\nfunc (fslc *FakeSoftLayerClient) initSoftLayerServices() {\n\tfslc.SoftLayerServices[\"SoftLayer_Account\"] = services.NewSoftLayer_Account_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Virtual_Guest\"] = services.NewSoftLayer_Virtual_Guest_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Virtual_Disk_Image\"] = services.NewSoftLayer_Virtual_Disk_Image_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Security_Ssh_Key\"] = services.NewSoftLayer_Security_Ssh_Key_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Network_Storage\"] = services.NewSoftLayer_Network_Storage_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Network_Storage_Allowed_Host\"] = services.NewSoftLayer_Network_Storage_Allowed_Host_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Product_Order\"] = services.NewSoftLayer_Product_Order_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Product_Package\"] = services.NewSoftLayer_Product_Package_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Billing_Item_Cancellation_Request\"] = services.NewSoftLayer_Billing_Item_Cancellation_Request_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\"] = services.NewSoftLayer_Virtual_Guest_Block_Device_Template_Group_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Hardware\"] = services.NewSoftLayer_Hardware_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Dns_Domain\"] = services.NewSoftLayer_Dns_Domain_Service(fslc)\n\tfslc.SoftLayerServices[\"SoftLayer_Dns_Domain_ResourceRecord\"] = services.NewSoftLayer_Dns_Domain_Record_Service(fslc)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package main contains a simple command line tool for DistanceMatrix\n\/\/ Directions docs: https:\/\/developers.google.com\/maps\/documentation\/distancematrix\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/kr\/pretty\"\n\t\"google.golang.org\/maps\"\n)\n\nvar (\n\tapiKey = flag.String(\"key\", \"\", \"API Key for using Google Maps API.\")\n\torigins = flag.String(\"origins\", \"\", \"One or more addresses and\/or textual latitude\/longitude values, separated with the pipe (|) character, from which to calculate distance and time.\")\n\tdestinations = flag.String(\"destinations\", \"\", \"One or more addresses and\/or textual latitude\/longitude values, separated with the pipe (|) character, to which to calculate distance and time.\")\n\tmode = flag.String(\"mode\", \"\", \"Specifies the mode of transport to use when calculating distance.\")\n\tlanguage = flag.String(\"language\", \"\", \"The language in which to return results.\")\n\tavoid = flag.String(\"avoid\", \"\", \"Introduces restrictions to the route.\")\n\tunits = flag.String(\"units\", \"\", \"Specifies the unit system to use when expressing distance as text.\")\n\tdepartureTime = flag.String(\"departure_time\", \"\", \"The desired time of departure.\")\n\tarrivalTime = flag.String(\"arrival_time\", \"\", \"Specifies the desired time of arrival.\")\n\ttransitMode = flag.String(\"transit_mode\", \"\", \"Specifies one or more preferred modes of transit.\")\n\ttransitRoutingPreference = flag.String(\"transit_routing_preference\", \"\", \"Specifies preferences for transit requests.\")\n)\n\nfunc usageAndExit(msg string) {\n\tfmt.Fprintln(os.Stderr, msg)\n\tfmt.Println(\"Flags:\")\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Parse()\n\tclient := &http.Client{}\n\tif *apiKey == \"\" {\n\t\tusageAndExit(\"Please specify an API Key.\")\n\t}\n\tctx := maps.NewContext(*apiKey, client)\n\n\tr := &maps.DistanceMatrixRequest{\n\t\tLanguage: *language,\n\t\tDepartureTime: *departureTime,\n\t\tArrivalTime: *arrivalTime,\n\t}\n\n\tif *origins != \"\" {\n\t\tr.Origins = strings.Split(*origins, \"|\")\n\t}\n\tif *destinations != \"\" {\n\t\tr.Destinations = strings.Split(*destinations, \"|\")\n\t}\n\n\tlookupMode(*mode, r)\n\tlookupAvoid(*avoid, r)\n\tlookupUnits(*units, r)\n\tlookupTransitMode(*transitMode, r)\n\tlookupTransitRoutingPreference(*transitRoutingPreference, r)\n\n\tpretty.Println(r)\n\n\tresp, err := r.Get(ctx)\n\tif err != nil {\n\t\tlog.Fatalf(\"Fatal error: %s\", err)\n\t}\n\n\tpretty.Println(resp)\n}\n\nfunc lookupMode(mode string, r *maps.DistanceMatrixRequest) {\n\tif mode != \"\" {\n\t\tswitch {\n\t\tcase mode == \"driving\":\n\t\t\tr.Mode = maps.TravelModeDriving\n\t\tcase mode == \"walking\":\n\t\t\tr.Mode = maps.TravelModeWalking\n\t\tcase mode == \"bicycling\":\n\t\t\tr.Mode = maps.TravelModeBicycling\n\t\tcase mode == \"transit\":\n\t\t\tr.Mode = maps.TravelModeTransit\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown mode %s\", mode)\n\t\t}\n\t}\n}\n\nfunc lookupAvoid(avoid string, r *maps.DistanceMatrixRequest) {\n\tif avoid != \"\" {\n\t\tswitch {\n\t\tcase avoid == \"tolls\":\n\t\t\tr.Avoid = maps.AvoidTolls\n\t\tcase avoid == \"highways\":\n\t\t\tr.Avoid = maps.AvoidHighways\n\t\tcase avoid == \"ferries\":\n\t\t\tr.Avoid = maps.AvoidFerries\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown avoid restriction %s\", avoid)\n\t\t}\n\t}\n}\n\nfunc lookupUnits(units string, r *maps.DistanceMatrixRequest) {\n\tif units != \"\" {\n\t\tswitch {\n\t\tcase units == \"metric\":\n\t\t\tr.Units = maps.UnitsMetric\n\t\tcase units == \"imperial\":\n\t\t\tr.Units = maps.UnitsImperial\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown units %s\", units)\n\t\t}\n\t}\n}\n\nfunc lookupTransitMode(transitMode string, r *maps.DistanceMatrixRequest) {\n\tif transitMode != \"\" {\n\t\tswitch {\n\t\tcase transitMode == \"bus\":\n\t\t\tr.TransitMode = maps.TransitModeBus\n\t\tcase transitMode == \"subway\":\n\t\t\tr.TransitMode = maps.TransitModeSubway\n\t\tcase transitMode == \"train\":\n\t\t\tr.TransitMode = maps.TransitModeTrain\n\t\tcase transitMode == \"tram\":\n\t\t\tr.TransitMode = maps.TransitModeTram\n\t\tcase transitMode == \"rail\":\n\t\t\tr.TransitMode = maps.TransitModeRail\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown transit_mode %s\", transitMode)\n\t\t}\n\t}\n}\n\nfunc lookupTransitRoutingPreference(transitRoutingPreference string, r *maps.DistanceMatrixRequest) {\n\tif transitRoutingPreference != \"\" {\n\t\tswitch {\n\t\tcase transitRoutingPreference == \"fewer_transfers\":\n\t\t\tr.TransitRoutingPreference = maps.TransitRoutingPreferenceFewerTransfers\n\t\tcase transitRoutingPreference == \"less_walking\":\n\t\t\tr.TransitRoutingPreference = maps.TransitRoutingPreferenceLessWalking\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown transit_routing_preference %s\", transitRoutingPreference)\n\t\t}\n\t}\n}\n<commit_msg>Minor cleanup<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package main contains a simple command line tool for DistanceMatrix\n\/\/ Directions docs: https:\/\/developers.google.com\/maps\/documentation\/distancematrix\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/kr\/pretty\"\n\t\"google.golang.org\/maps\"\n)\n\nvar (\n\tapiKey = flag.String(\"key\", \"\", \"API Key for using Google Maps API.\")\n\torigins = flag.String(\"origins\", \"\", \"One or more addresses and\/or textual latitude\/longitude values, separated with the pipe (|) character, from which to calculate distance and time.\")\n\tdestinations = flag.String(\"destinations\", \"\", \"One or more addresses and\/or textual latitude\/longitude values, separated with the pipe (|) character, to which to calculate distance and time.\")\n\tmode = flag.String(\"mode\", \"\", \"Specifies the mode of transport to use when calculating distance.\")\n\tlanguage = flag.String(\"language\", \"\", \"The language in which to return results.\")\n\tavoid = flag.String(\"avoid\", \"\", \"Introduces restrictions to the route.\")\n\tunits = flag.String(\"units\", \"\", \"Specifies the unit system to use when expressing distance as text.\")\n\tdepartureTime = flag.String(\"departure_time\", \"\", \"The desired time of departure.\")\n\tarrivalTime = flag.String(\"arrival_time\", \"\", \"Specifies the desired time of arrival.\")\n\ttransitMode = flag.String(\"transit_mode\", \"\", \"Specifies one or more preferred modes of transit.\")\n\ttransitRoutingPreference = flag.String(\"transit_routing_preference\", \"\", \"Specifies preferences for transit requests.\")\n)\n\nfunc usageAndExit(msg string) {\n\tfmt.Fprintln(os.Stderr, msg)\n\tfmt.Println(\"Flags:\")\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Parse()\n\tclient := &http.Client{}\n\tif *apiKey == \"\" {\n\t\tusageAndExit(\"Please specify an API Key.\")\n\t}\n\tctx := maps.NewContext(*apiKey, client)\n\n\tr := &maps.DistanceMatrixRequest{\n\t\tLanguage: *language,\n\t\tDepartureTime: *departureTime,\n\t\tArrivalTime: *arrivalTime,\n\t}\n\n\tif *origins != \"\" {\n\t\tr.Origins = strings.Split(*origins, \"|\")\n\t}\n\tif *destinations != \"\" {\n\t\tr.Destinations = strings.Split(*destinations, \"|\")\n\t}\n\n\tlookupMode(*mode, r)\n\tlookupAvoid(*avoid, r)\n\tlookupUnits(*units, r)\n\tlookupTransitMode(*transitMode, r)\n\tlookupTransitRoutingPreference(*transitRoutingPreference, r)\n\n\tpretty.Println(r)\n\n\tresp, err := r.Get(ctx)\n\tif err != nil {\n\t\tlog.Fatalf(\"fatal error: %s\", err)\n\t}\n\n\tpretty.Println(resp)\n}\n\nfunc lookupMode(mode string, r *maps.DistanceMatrixRequest) {\n\tif mode != \"\" {\n\t\tswitch {\n\t\tcase mode == \"driving\":\n\t\t\tr.Mode = maps.TravelModeDriving\n\t\tcase mode == \"walking\":\n\t\t\tr.Mode = maps.TravelModeWalking\n\t\tcase mode == \"bicycling\":\n\t\t\tr.Mode = maps.TravelModeBicycling\n\t\tcase mode == \"transit\":\n\t\t\tr.Mode = maps.TravelModeTransit\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown mode %s\", mode)\n\t\t}\n\t}\n}\n\nfunc lookupAvoid(avoid string, r *maps.DistanceMatrixRequest) {\n\tif avoid != \"\" {\n\t\tswitch {\n\t\tcase avoid == \"tolls\":\n\t\t\tr.Avoid = maps.AvoidTolls\n\t\tcase avoid == \"highways\":\n\t\t\tr.Avoid = maps.AvoidHighways\n\t\tcase avoid == \"ferries\":\n\t\t\tr.Avoid = maps.AvoidFerries\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown avoid restriction %s\", avoid)\n\t\t}\n\t}\n}\n\nfunc lookupUnits(units string, r *maps.DistanceMatrixRequest) {\n\tif units != \"\" {\n\t\tswitch {\n\t\tcase units == \"metric\":\n\t\t\tr.Units = maps.UnitsMetric\n\t\tcase units == \"imperial\":\n\t\t\tr.Units = maps.UnitsImperial\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown units %s\", units)\n\t\t}\n\t}\n}\n\nfunc lookupTransitMode(transitMode string, r *maps.DistanceMatrixRequest) {\n\tif transitMode != \"\" {\n\t\tswitch {\n\t\tcase transitMode == \"bus\":\n\t\t\tr.TransitMode = maps.TransitModeBus\n\t\tcase transitMode == \"subway\":\n\t\t\tr.TransitMode = maps.TransitModeSubway\n\t\tcase transitMode == \"train\":\n\t\t\tr.TransitMode = maps.TransitModeTrain\n\t\tcase transitMode == \"tram\":\n\t\t\tr.TransitMode = maps.TransitModeTram\n\t\tcase transitMode == \"rail\":\n\t\t\tr.TransitMode = maps.TransitModeRail\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown transit_mode %s\", transitMode)\n\t\t}\n\t}\n}\n\nfunc lookupTransitRoutingPreference(transitRoutingPreference string, r *maps.DistanceMatrixRequest) {\n\tif transitRoutingPreference != \"\" {\n\t\tswitch {\n\t\tcase transitRoutingPreference == \"fewer_transfers\":\n\t\t\tr.TransitRoutingPreference = maps.TransitRoutingPreferenceFewerTransfers\n\t\tcase transitRoutingPreference == \"less_walking\":\n\t\t\tr.TransitRoutingPreference = maps.TransitRoutingPreferenceLessWalking\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown transit_routing_preference %s\", transitRoutingPreference)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Router for Upwork API\n\/\/\n\/\/ Licensed under the Upwork's API Terms of Use;\n\/\/ you may not use this file except in compliance with the Terms.\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Author:: Maksym Novozhylov (mnovozhilov@upwork.com)\n\/\/ Copyright:: Copyright 2016(c) Upwork.com\n\/\/ License:: See LICENSE.txt and TOS - https:\/\/developers.upwork.com\/api-tos.html\npackage messages\n\nimport (\n \"net\/http\"\n \"github.com\/upwork\/golang-upwork\/api\"\n)\n\nconst (\n EntryPoint = \"api\"\n)\n\ntype a struct {\n client api.ApiClient\n}\n\n\/\/ Constructor\nfunc New(c api.ApiClient) (a) {\n var r a\n c.SetEntryPoint(EntryPoint)\n r.client = c\n\n return r\n}\n\n\/\/ Retrive rooms information\nfunc (r a) GetRooms(company string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\", nil)\n}\n\n\/\/ Get a specific room information\nfunc (r a) GetRoomDetails(company string, roomId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId, params)\n}\n\n\/\/ Get a specific room by offer ID\nfunc (r a) GetRoomByOffer(company string, offerId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/offers\/\" + offerId, params)\n}\n\n\/\/ Get a specific room by application ID\nfunc (r a) GetRoomByApplication(company string, applicationId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/applications\/\" + applicationId, params)\n}\n\n\/\/ Get a specific room by contract ID\nfunc (r a) GetRoomByContract(company string, contractId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/contracts\/\" + contractId, params)\n}\n\n\/\/ Create a new room\nfunc (r a) CreateRoom(company string, params map[string]string) (*http.Response, []byte) {\n return r.client.Post(\"\/messages\/v3\/\" + company + \"\/rooms\", params)\n}\n\n\/\/ Send a message to a room\nfunc (r a) SendMessageToRoom(company string, roomId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Post(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId + \"\/stories\", params)\n}\n\n\/\/ Update a room settings\nfunc (r a) UpdateRoomSettings(company string, roomId string, username string, params map[string]string) (*http.Response, []byte) {\n return r.client.Put(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId + \"\/users\/\" + username, params)\n}\n\n\/\/ Update the metadata of a room\nfunc (r a) UpdateRoomMetadata(company string, roomId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Put(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId, params)\n}\n<commit_msg>v1.2.0<commit_after>\/\/ Router for Upwork API\n\/\/\n\/\/ Licensed under the Upwork's API Terms of Use;\n\/\/ you may not use this file except in compliance with the Terms.\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Author:: Maksym Novozhylov (mnovozhilov@upwork.com)\n\/\/ Copyright:: Copyright 2016(c) Upwork.com\n\/\/ License:: See LICENSE.txt and TOS - https:\/\/developers.upwork.com\/api-tos.html\npackage messages\n\nimport (\n \"net\/http\"\n \"github.com\/upwork\/golang-upwork\/api\"\n)\n\nconst (\n EntryPoint = \"api\"\n)\n\ntype a struct {\n client api.ApiClient\n}\n\n\/\/ Constructor\nfunc New(c api.ApiClient) (a) {\n var r a\n c.SetEntryPoint(EntryPoint)\n r.client = c\n\n return r\n}\n\n\/\/ Retrieve rooms information\nfunc (r a) GetRooms(company string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\", params)\n}\n\n\/\/ Get a specific room information\nfunc (r a) GetRoomDetails(company string, roomId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId, params)\n}\n\n\/\/ Get a specific room by offer ID\nfunc (r a) GetRoomByOffer(company string, offerId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/offers\/\" + offerId, params)\n}\n\n\/\/ Get a specific room by application ID\nfunc (r a) GetRoomByApplication(company string, applicationId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/applications\/\" + applicationId, params)\n}\n\n\/\/ Get a specific room by contract ID\nfunc (r a) GetRoomByContract(company string, contractId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Get(\"\/messages\/v3\/\" + company + \"\/rooms\/contracts\/\" + contractId, params)\n}\n\n\/\/ Create a new room\nfunc (r a) CreateRoom(company string, params map[string]string) (*http.Response, []byte) {\n return r.client.Post(\"\/messages\/v3\/\" + company + \"\/rooms\", params)\n}\n\n\/\/ Send a message to a room\nfunc (r a) SendMessageToRoom(company string, roomId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Post(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId + \"\/stories\", params)\n}\n\n\/\/ Update a room settings\nfunc (r a) UpdateRoomSettings(company string, roomId string, username string, params map[string]string) (*http.Response, []byte) {\n return r.client.Put(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId + \"\/users\/\" + username, params)\n}\n\n\/\/ Update the metadata of a room\nfunc (r a) UpdateRoomMetadata(company string, roomId string, params map[string]string) (*http.Response, []byte) {\n return r.client.Put(\"\/messages\/v3\/\" + company + \"\/rooms\/\" + roomId, params)\n}\n<|endoftext|>"} {"text":"<commit_before>package acceptance_test\n\nimport (\n\t\"cf-pusher\/cf_cli_adapter\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"os\"\n)\n\nvar _ = Describe(\"external connectivity\", func() {\n\tvar (\n\t\tappA string\n\t\torgName string\n\t\tspaceName string\n\t\tappRoute string\n\t\tcli *cf_cli_adapter.Adapter\n\t)\n\n\tBeforeEach(func() {\n\t\tif testConfig.Internetless {\n\t\t\tSkip(\"skipping egress policy tests\")\n\t\t}\n\n\t\tcli = &cf_cli_adapter.Adapter{CfCliPath: \"cf\"}\n\t\tappA = fmt.Sprintf(\"appA-%d\", rand.Int31())\n\n\t\torgName = testConfig.Prefix + \"egress-policy-org\"\n\t\tspaceName = testConfig.Prefix + \"space\"\n\t\tsetupOrgAndSpace(orgName, spaceName)\n\n\t\tBy(\"unbinding all running ASGs\")\n\t\tfor _, sg := range testConfig.DefaultSecurityGroups {\n\t\t\tExpect(cf.Cf(\"unbind-running-security-group\", sg).Wait(Timeout_Short)).To(gexec.Exit(0))\n\t\t}\n\n\t\tBy(\"pushing the test app\")\n\t\tpushProxy(appA)\n\t\tappRoute = fmt.Sprintf(\"http:\/\/%s.%s\/\", appA, config.AppsDomain)\n\t})\n\n\tAfterEach(func() {\n\t\tBy(\"adding back all the original running ASGs\")\n\t\tfor _, sg := range testConfig.DefaultSecurityGroups {\n\t\t\tExpect(cf.Cf(\"bind-running-security-group\", sg).Wait(Timeout_Short)).To(gexec.Exit(0))\n\t\t}\n\n\t\tBy(\"deleting the test org\")\n\t\tExpect(cf.Cf(\"delete-org\", orgName, \"-f\").Wait(Timeout_Push)).To(gexec.Exit(0))\n\t})\n\n\tcheckRequest := func(route string, expectedStatusCode int, expectedResponseSubstring string) error {\n\t\tresp, err := http.Get(route)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\trespBytes, err := ioutil.ReadAll(resp.Body)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\trespBody := string(respBytes)\n\n\t\tif resp.StatusCode != expectedStatusCode {\n\t\t\treturn fmt.Errorf(\"test http get to %s: expected response code %d but got %d. response body:\\n%s\", route, expectedStatusCode, resp.StatusCode, respBody)\n\t\t}\n\t\tif !strings.Contains(respBody, expectedResponseSubstring) {\n\t\t\treturn fmt.Errorf(\"test http get to %s: expected response to contain %q but instead saw:\\n%s\", route, expectedResponseSubstring, respBody)\n\t\t}\n\t\treturn nil\n\t}\n\n\tcanProxy := func() error {\n\t\treturn checkRequest(appRoute+\"proxy\/example.com\", 200, \"Example Domain\")\n\t}\n\tcannotProxy := func() error {\n\t\treturn checkRequest(appRoute+\"proxy\/example.com\", 500, \"example.com\")\n\t}\n\n\tDescribe(\"egress policy connectivity\", func() {\n\t\tIt(\"the app can reach the internet when egress policy is present\", func(done Done) {\n\t\t\tBy(\"checking that the app cannot reach the internet using http and dns\")\n\t\t\tEventually(cannotProxy, \"10s\", \"1s\").Should(Succeed())\n\t\t\tConsistently(cannotProxy, \"2s\", \"0.5s\").Should(Succeed())\n\n\t\t\tBy(\"creating egress policy\")\n\t\t\tappAGuid, err := cli.AppGuid(appA)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tcreateEgressPolicy(cli, fmt.Sprintf(testEgressPolicies, appAGuid))\n\n\t\t\tBy(\"checking that the app can use dns and http to reach the internet\")\n\t\t\tEventually(canProxy, \"10s\", \"1s\").Should(Succeed())\n\t\t\tConsistently(canProxy, \"2s\", \"0.5s\").Should(Succeed())\n\n\t\t\tclose(done)\n\t\t}, 180 \/* <-- overall spec timeout in seconds *\/)\n\t})\n})\n\nfunc createEgressPolicy(cli *cf_cli_adapter.Adapter, payload string) {\n\tpayloadFile, err := ioutil.TempFile(\"\", \"\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, err = payloadFile.Write([]byte(payload))\n\tExpect(err).NotTo(HaveOccurred())\n\n\terr = payloadFile.Close()\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, err = cli.Curl(\"POST\", \"\/networking\/v1\/external\/policies\", payloadFile.Name())\n\tExpect(err).NotTo(HaveOccurred())\n\n\terr = os.Remove(payloadFile.Name())\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nvar testEgressPolicies = `\n\t\"egress_policies\": [\n\t\t{\n\t\t\t\"source\": {\"id\": %q},\n\t\t\t\"destination\": {\n\t\t\t\t\"protocol\": \"tcp\",\n\t\t\t\t\"ips\": [\n\t\t\t\t\t\"start\": \"0.0.0.0\",\n\t\t\t\t\t\"end\": \"255.255.255.255\",\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\t]\n`\n<commit_msg>Update test egress policy json<commit_after>package acceptance_test\n\nimport (\n\t\"cf-pusher\/cf_cli_adapter\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"os\"\n)\n\nvar _ = Describe(\"external connectivity\", func() {\n\tvar (\n\t\tappA string\n\t\torgName string\n\t\tspaceName string\n\t\tappRoute string\n\t\tcli *cf_cli_adapter.Adapter\n\t)\n\n\tBeforeEach(func() {\n\t\tif testConfig.Internetless {\n\t\t\tSkip(\"skipping egress policy tests\")\n\t\t}\n\n\t\tcli = &cf_cli_adapter.Adapter{CfCliPath: \"cf\"}\n\t\tappA = fmt.Sprintf(\"appA-%d\", rand.Int31())\n\n\t\torgName = testConfig.Prefix + \"egress-policy-org\"\n\t\tspaceName = testConfig.Prefix + \"space\"\n\t\tsetupOrgAndSpace(orgName, spaceName)\n\n\t\tBy(\"unbinding all running ASGs\")\n\t\tfor _, sg := range testConfig.DefaultSecurityGroups {\n\t\t\tExpect(cf.Cf(\"unbind-running-security-group\", sg).Wait(Timeout_Short)).To(gexec.Exit(0))\n\t\t}\n\n\t\tBy(\"pushing the test app\")\n\t\tpushProxy(appA)\n\t\tappRoute = fmt.Sprintf(\"http:\/\/%s.%s\/\", appA, config.AppsDomain)\n\t})\n\n\tAfterEach(func() {\n\t\tBy(\"adding back all the original running ASGs\")\n\t\tfor _, sg := range testConfig.DefaultSecurityGroups {\n\t\t\tExpect(cf.Cf(\"bind-running-security-group\", sg).Wait(Timeout_Short)).To(gexec.Exit(0))\n\t\t}\n\n\t\tBy(\"deleting the test org\")\n\t\tExpect(cf.Cf(\"delete-org\", orgName, \"-f\").Wait(Timeout_Push)).To(gexec.Exit(0))\n\t})\n\n\tcheckRequest := func(route string, expectedStatusCode int, expectedResponseSubstring string) error {\n\t\tresp, err := http.Get(route)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\trespBytes, err := ioutil.ReadAll(resp.Body)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\trespBody := string(respBytes)\n\n\t\tif resp.StatusCode != expectedStatusCode {\n\t\t\treturn fmt.Errorf(\"test http get to %s: expected response code %d but got %d. response body:\\n%s\", route, expectedStatusCode, resp.StatusCode, respBody)\n\t\t}\n\t\tif !strings.Contains(respBody, expectedResponseSubstring) {\n\t\t\treturn fmt.Errorf(\"test http get to %s: expected response to contain %q but instead saw:\\n%s\", route, expectedResponseSubstring, respBody)\n\t\t}\n\t\treturn nil\n\t}\n\n\tcanProxy := func() error {\n\t\treturn checkRequest(appRoute+\"proxy\/example.com\", 200, \"Example Domain\")\n\t}\n\tcannotProxy := func() error {\n\t\treturn checkRequest(appRoute+\"proxy\/example.com\", 500, \"example.com\")\n\t}\n\n\tDescribe(\"egress policy connectivity\", func() {\n\t\tIt(\"the app can reach the internet when egress policy is present\", func(done Done) {\n\t\t\tBy(\"checking that the app cannot reach the internet using http and dns\")\n\t\t\tEventually(cannotProxy, \"10s\", \"1s\").Should(Succeed())\n\t\t\tConsistently(cannotProxy, \"2s\", \"0.5s\").Should(Succeed())\n\n\t\t\tBy(\"creating egress policy\")\n\t\t\tappAGuid, err := cli.AppGuid(appA)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tcreateEgressPolicy(cli, fmt.Sprintf(testEgressPolicies, appAGuid))\n\n\t\t\tBy(\"checking that the app can use dns and http to reach the internet\")\n\t\t\tEventually(canProxy, \"10s\", \"1s\").Should(Succeed())\n\t\t\tConsistently(canProxy, \"2s\", \"0.5s\").Should(Succeed())\n\n\t\t\tclose(done)\n\t\t}, 180 \/* <-- overall spec timeout in seconds *\/)\n\t})\n})\n\nfunc createEgressPolicy(cli *cf_cli_adapter.Adapter, payload string) {\n\tpayloadFile, err := ioutil.TempFile(\"\", \"\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, err = payloadFile.Write([]byte(payload))\n\tExpect(err).NotTo(HaveOccurred())\n\n\terr = payloadFile.Close()\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, err = cli.Curl(\"POST\", \"\/networking\/v1\/external\/policies\", payloadFile.Name())\n\tExpect(err).NotTo(HaveOccurred())\n\n\terr = os.Remove(payloadFile.Name())\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nvar testEgressPolicies = `\n{\n \"egress_policies\": [\n {\n \"source\": {\n \"id\": %q\n },\n \"destination\": {\n \"protocol\": \"tcp\",\n \"ips\": [\n {\n \"start\": \"0.0.0.0\",\n \"end\": \"255.255.255.255\"\n }\n ]\n }\n }\n ]\n}`\n<|endoftext|>"} {"text":"<commit_before>package hsup\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar ErrNoSlugURL = errors.New(\"no slug specified\")\n\nconst profileRunnerText = `#!\/bin\/bash\nexport PS1='\\[\\033[01;34m\\]\\w\\[\\033[00m\\] \\[\\033[01;32m\\]$ \\[\\033[00m\\]'\n\nif [ -d \/etc\/profile.d ]; then\n for i in \/etc\/profile.d\/*.sh; do\n if [ -r $i ]; then\n . $i\n fi\n done\n unset i\nfi\n\nif [ -d \/app\/.profile.d ]; then\n for i in \/app\/.profile.d\/*.sh; do\n if [ -r $i ]; then\n . $i\n fi\n done\n unset i\nfi\n\nrm $0\ncmd=\"$@\" # trick to disable word splitting\nexec bash -c \"$cmd\"\n`\n\ntype profileRunner struct {\n\tfile *os.File\n}\n\nfunc (pr *profileRunner) Init() (err error) {\n\tif pr.file, err = ioutil.TempFile(\"\", \"pr_\"); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = pr.file.Write([]byte(profileRunnerText)); err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(pr.file.Name())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = os.Chmod(pr.file.Name(), fi.Mode()|0111); err != nil {\n\t\treturn err\n\t}\n\n\treturn pr.file.Close()\n}\n\nfunc (pr *profileRunner) Args(args []string) []string {\n\treturn append([]string{pr.file.Name()}, args...)\n}\n\ntype AbsPathDynoDriver struct{}\n\nfunc (dd *AbsPathDynoDriver) fetch(release *Release) error {\n\tif release.slugURL == \"\" {\n\t\treturn ErrNoSlugURL\n\t}\n\n\tswitch release.Where() {\n\tcase Local:\n\t\t\/\/ No-op: the slug is already available on the file\n\t\t\/\/ system.\n\tcase HTTP:\n\t\tlog.Printf(\"fetching slug URL %q\", release.slugURL)\n\n\t\tresp, err := http.Get(release.slugURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tout, err := os.Create(\"\/tmp\/slug.tgz\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer out.Close()\n\n\t\tif _, err := io.Copy(out, resp.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trelease.slugURL = \"\/tmp\/slug.tgz\"\n\t}\n\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) unpack(release *Release) error {\n\tif release.slugURL == \"\" {\n\t\treturn nil\n\t}\n\n\tif release.Where() != Local {\n\t\tpanic(\"by unpack, expect release slugURL to be \" +\n\t\t\t\"transmogrified to a local path\")\n\t}\n\n\tcmd := exec.Command(\"\/bin\/tar\", \"-C\", \"\/app\", \"--strip-components=2\", \"-zxf\",\n\t\trelease.slugURL)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) Build(release *Release) (err error) {\n\tif release.Where() == HTTP {\n\t\tif err = dd.fetch(release); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = dd.unpack(release); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) Start(ex *Executor) (err error) {\n\tvar pr profileRunner\n\tif err = pr.Init(); err != nil {\n\t\treturn err\n\t}\n\n\targs := pr.Args(ex.Args)\n\tex.cmd = exec.Command(args[0], args[1:]...)\n\n\tex.cmd.Stdin = os.Stdin\n\tex.cmd.Stdout = os.Stdout\n\tex.cmd.Stderr = os.Stderr\n\n\t\/\/ Tee stdout and stderr to Logplex.\n\tif ex.LogplexURL != nil {\n\t\tvar rStdout, rStderr io.Reader\n\t\tex.logsRelay, err = newRelay(ex.LogplexURL, ex.Name())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trStdout, ex.cmd.Stdout = teePipe(os.Stdout)\n\t\trStderr, ex.cmd.Stderr = teePipe(os.Stderr)\n\n\t\tgo ex.logsRelay.run(rStdout)\n\t\tgo ex.logsRelay.run(rStderr)\n\t}\n\n\tex.cmd.Dir = \"\/app\"\n\n\t\/\/ Fill environment vector from Heroku configuration, with a\n\t\/\/ default $PATH.\n\tex.cmd.Env = []string{\"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\" +\n\t\t\"\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\",\n\t\t\"HOME=\/app\", \"DYNO=\" + ex.Name(), \"PORT=5000\"}\n\n\tfor k, v := range ex.Release.config {\n\t\tex.cmd.Env = append(ex.cmd.Env, k+\"=\"+v)\n\t}\n\n\tex.cmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n\n\tif err = ex.cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tex.waiting = make(chan struct{})\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) Wait(ex *Executor) (s *ExitStatus) {\n\ts = &ExitStatus{}\n\terr := ex.cmd.Wait()\n\tif err != nil {\n\t\tif eErr, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := eErr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\ts.Code = status.ExitStatus()\n\t\t\t\tif status.Signaled() {\n\t\t\t\t\ts.Code = 128 + int(status.Signal())\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Non ExitErrors are propagated: they are\n\t\t\t\/\/ liable to be errors in starting the\n\t\t\t\/\/ process.\n\t\t\ts.Err = err\n\t\t}\n\t}\n\n\tif ex.logsRelay != nil {\n\t\t\/\/ wait until all buffered logs are delivered\n\t\tex.logsRelay.stop()\n\t}\n\tgo func() {\n\t\tex.waiting <- struct{}{}\n\t}()\n\n\treturn s\n}\n\nfunc (dd *AbsPathDynoDriver) Stop(ex *Executor) error {\n\tp := ex.cmd.Process\n\n\tgroup, err := os.FindProcess(-1 * p.Pid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Begin graceful shutdown via SIGTERM.\n\tgroup.Signal(syscall.SIGTERM)\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tlog.Println(\"sigkill\", group)\n\t\t\tgroup.Signal(syscall.SIGKILL)\n\t\tcase <-ex.waiting:\n\t\t\tlog.Println(\"waited\", group)\n\t\t\treturn nil\n\t\t}\n\t\tlog.Println(\"spin\", group)\n\t\ttime.Sleep(1)\n\t}\n}\n<commit_msg>Use \"$*\" to run command without word splitting<commit_after>package hsup\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar ErrNoSlugURL = errors.New(\"no slug specified\")\n\nconst profileRunnerText = `#!\/bin\/bash\nexport PS1='\\[\\033[01;34m\\]\\w\\[\\033[00m\\] \\[\\033[01;32m\\]$ \\[\\033[00m\\]'\n\nif [ -d \/etc\/profile.d ]; then\n for i in \/etc\/profile.d\/*.sh; do\n if [ -r $i ]; then\n . $i\n fi\n done\n unset i\nfi\n\nif [ -d \/app\/.profile.d ]; then\n for i in \/app\/.profile.d\/*.sh; do\n if [ -r $i ]; then\n . $i\n fi\n done\n unset i\nfi\n\nrm $0\nexec bash -c \"$*\"\n`\n\ntype profileRunner struct {\n\tfile *os.File\n}\n\nfunc (pr *profileRunner) Init() (err error) {\n\tif pr.file, err = ioutil.TempFile(\"\", \"pr_\"); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = pr.file.Write([]byte(profileRunnerText)); err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(pr.file.Name())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = os.Chmod(pr.file.Name(), fi.Mode()|0111); err != nil {\n\t\treturn err\n\t}\n\n\treturn pr.file.Close()\n}\n\nfunc (pr *profileRunner) Args(args []string) []string {\n\treturn append([]string{pr.file.Name()}, args...)\n}\n\ntype AbsPathDynoDriver struct{}\n\nfunc (dd *AbsPathDynoDriver) fetch(release *Release) error {\n\tif release.slugURL == \"\" {\n\t\treturn ErrNoSlugURL\n\t}\n\n\tswitch release.Where() {\n\tcase Local:\n\t\t\/\/ No-op: the slug is already available on the file\n\t\t\/\/ system.\n\tcase HTTP:\n\t\tlog.Printf(\"fetching slug URL %q\", release.slugURL)\n\n\t\tresp, err := http.Get(release.slugURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tout, err := os.Create(\"\/tmp\/slug.tgz\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer out.Close()\n\n\t\tif _, err := io.Copy(out, resp.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trelease.slugURL = \"\/tmp\/slug.tgz\"\n\t}\n\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) unpack(release *Release) error {\n\tif release.slugURL == \"\" {\n\t\treturn nil\n\t}\n\n\tif release.Where() != Local {\n\t\tpanic(\"by unpack, expect release slugURL to be \" +\n\t\t\t\"transmogrified to a local path\")\n\t}\n\n\tcmd := exec.Command(\"\/bin\/tar\", \"-C\", \"\/app\", \"--strip-components=2\", \"-zxf\",\n\t\trelease.slugURL)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) Build(release *Release) (err error) {\n\tif release.Where() == HTTP {\n\t\tif err = dd.fetch(release); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = dd.unpack(release); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) Start(ex *Executor) (err error) {\n\tvar pr profileRunner\n\tif err = pr.Init(); err != nil {\n\t\treturn err\n\t}\n\n\targs := pr.Args(ex.Args)\n\tex.cmd = exec.Command(args[0], args[1:]...)\n\n\tex.cmd.Stdin = os.Stdin\n\tex.cmd.Stdout = os.Stdout\n\tex.cmd.Stderr = os.Stderr\n\n\t\/\/ Tee stdout and stderr to Logplex.\n\tif ex.LogplexURL != nil {\n\t\tvar rStdout, rStderr io.Reader\n\t\tex.logsRelay, err = newRelay(ex.LogplexURL, ex.Name())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trStdout, ex.cmd.Stdout = teePipe(os.Stdout)\n\t\trStderr, ex.cmd.Stderr = teePipe(os.Stderr)\n\n\t\tgo ex.logsRelay.run(rStdout)\n\t\tgo ex.logsRelay.run(rStderr)\n\t}\n\n\tex.cmd.Dir = \"\/app\"\n\n\t\/\/ Fill environment vector from Heroku configuration, with a\n\t\/\/ default $PATH.\n\tex.cmd.Env = []string{\"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\" +\n\t\t\"\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\",\n\t\t\"HOME=\/app\", \"DYNO=\" + ex.Name(), \"PORT=5000\"}\n\n\tfor k, v := range ex.Release.config {\n\t\tex.cmd.Env = append(ex.cmd.Env, k+\"=\"+v)\n\t}\n\n\tex.cmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n\n\tif err = ex.cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tex.waiting = make(chan struct{})\n\treturn nil\n}\n\nfunc (dd *AbsPathDynoDriver) Wait(ex *Executor) (s *ExitStatus) {\n\ts = &ExitStatus{}\n\terr := ex.cmd.Wait()\n\tif err != nil {\n\t\tif eErr, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := eErr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\ts.Code = status.ExitStatus()\n\t\t\t\tif status.Signaled() {\n\t\t\t\t\ts.Code = 128 + int(status.Signal())\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Non ExitErrors are propagated: they are\n\t\t\t\/\/ liable to be errors in starting the\n\t\t\t\/\/ process.\n\t\t\ts.Err = err\n\t\t}\n\t}\n\n\tif ex.logsRelay != nil {\n\t\t\/\/ wait until all buffered logs are delivered\n\t\tex.logsRelay.stop()\n\t}\n\tgo func() {\n\t\tex.waiting <- struct{}{}\n\t}()\n\n\treturn s\n}\n\nfunc (dd *AbsPathDynoDriver) Stop(ex *Executor) error {\n\tp := ex.cmd.Process\n\n\tgroup, err := os.FindProcess(-1 * p.Pid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Begin graceful shutdown via SIGTERM.\n\tgroup.Signal(syscall.SIGTERM)\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tlog.Println(\"sigkill\", group)\n\t\t\tgroup.Signal(syscall.SIGKILL)\n\t\tcase <-ex.waiting:\n\t\t\tlog.Println(\"waited\", group)\n\t\t\treturn nil\n\t\t}\n\t\tlog.Println(\"spin\", group)\n\t\ttime.Sleep(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package guest_fsresize\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/vtolstov\/go-ioctl\"\n)\n\nfunc resizefs(path string) error {\n\tvar err error\n\tvar stdout io.ReadCloser\n\tvar stdin bytes.Buffer\n\n\tpartstart := 0\n\tpartnum := 0\n\tdevice := \"\/tmp\/resize_dev\"\n\tpartition := \"\/tmp\/resize_part\"\n\tactive := false\n\textended := false\n\tparttype := \"Linux\"\n\tdevFs, err := findFs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdevBlk, err := findBlock(\"\/sys\/block\", devFs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tos.Remove(device)\n\tif err = syscall.Mknod(device, uint32(os.ModeDevice|syscall.S_IFBLK|0600), devBlk.Int()); err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(device)\n\t\/\/\tmbr := make([]byte, 446)\n\n\t\/*\n\t\tf, err := os.OpenFile(device, os.O_RDONLY, os.FileMode(0400))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.ReadFull(f, mbr)\n\t\tf.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\tcmd := exec.Command(\"fdisk\", \"-l\", \"-u\", device)\n\tstdout, err = cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Printf(\"failed to open %s via fdisk %s 2\\n\", device, err.Error())\n\t\treturn err\n\t}\n\tr := bufio.NewReader(stdout)\n\n\tif err = cmd.Start(); err != nil {\n\t\tlog.Printf(\"failed to open %s via fdisk %s 3\\n\", device, err.Error())\n\t\treturn err\n\t}\n\n\tfor {\n\t\tline, err := r.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif strings.HasPrefix(line, device) {\n\t\t\tpartnum += 1\n\t\t\t\/\/\/test3 * 16384 204799 188416 92M 5 Extended\n\t\t\tps := strings.Fields(line)\n\t\t\tif ps[1] == \"*\" {\n\t\t\t\tactive = true\n\t\t\t\tpartstart, _ = strconv.Atoi(ps[2])\n\t\t\t\tif len(ps) > 7 {\n\t\t\t\t\tparttype = ps[6]\n\t\t\t\t\tif ps[7] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tparttype = ps[5]\n\t\t\t\t\tif ps[6] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tactive = false\n\t\t\t\tpartstart, _ = strconv.Atoi(ps[1])\n\t\t\t\tif len(ps) > 6 {\n\t\t\t\t\tparttype = ps[5]\n\t\t\t\t\tif ps[6] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tparttype = ps[4]\n\t\t\t\t\tif ps[5] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif err = cmd.Wait(); err != nil || partstart == 0 {\n\t\treturn fmt.Errorf(\"failed to open %s via fdisk 4\\n\", device)\n\t}\n\tif partnum > 1 {\n\t\tstdin.Write([]byte(\"d\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\"))\n\t} else {\n\t\tstdin.Write([]byte(\"d\\n\"))\n\t}\n\tif extended {\n\t\tstdin.Write([]byte(\"n\\nl\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\" + fmt.Sprintf(\"%d\", partstart) + \"\\n\\n\"))\n\t} else {\n\t\tstdin.Write([]byte(\"n\\np\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\" + fmt.Sprintf(\"%d\", partstart) + \"\\n\\n\"))\n\t}\n\tif active {\n\t\tstdin.Write([]byte(\"a\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\"))\n\t}\n\tif partnum > 1 {\n\t\tstdin.Write([]byte(\"t\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\" + parttype + \"\\nw\"))\n\t} else {\n\t\tstdin.Write([]byte(\"t\\n\" + parttype + \"\\nw\"))\n\t}\n\tcmd = exec.Command(\"fdisk\", \"-u\", device)\n\tcmd.Stdin = &stdin\n\tcmd.Run()\n\tstdin.Reset()\n\n\tw, err := os.OpenFile(device, os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/*\n\t\t_, err = w.Write(mbr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = w.Sync()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\tblkerr := ioctl.BlkRRPart(w.Fd())\n\terr = w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif blkerr != nil {\n\t\targs := []string{}\n\t\tfor _, name := range []string{\"partx\", \"partprobe\", \"kpartx\"} {\n\t\t\tif _, err = exec.LookPath(name); err == nil {\n\t\t\t\tswitch name {\n\t\t\t\tcase \"partx\":\n\t\t\t\t\targs = []string{\"-u\", device}\n\t\t\t\tdefault:\n\t\t\t\t\targs = []string{device}\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"update partition table via %s %s\", name, strings.Join(args, \" \"))\n\t\t\t\tif err = exec.Command(name, args...).Run(); err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tos.Remove(partition)\n\tif err = syscall.Mknod(partition, uint32(os.ModeDevice|syscall.S_IFBLK|0600), devFs.Int()); err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(partition)\n\tlog.Printf(\"resize filesystem via %s %s\", \"resize2fs\", partition)\n\tbuf, err := exec.Command(\"resize2fs\", partition).CombinedOutput()\n\tif err != nil {\n\t\tlog.Printf(\"resize2fs %s\", buf)\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype Dev struct {\n\tMajor uint64\n\tMinor uint64\n}\n\nfunc (d *Dev) String() string {\n\treturn fmt.Sprintf(\"%d:%d\", d.Major, d.Minor)\n}\n\nfunc (d *Dev) Int() int {\n\treturn int(d.Major*256 + d.Minor)\n}\n\nfunc findFs() (*Dev, error) {\n\tvar st syscall.Stat_t\n\n\terr := syscall.Stat(\"\/\", &st)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Dev{Major: uint64(st.Dev \/ 256), Minor: uint64(st.Dev % 256)}, nil\n}\n\nfunc findBlock(start string, s *Dev) (*Dev, error) {\n\tvar err error\n\tfis, err := ioutil.ReadDir(start)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fi := range fis {\n\t\tswitch fi.Name() {\n\t\tcase \"bdi\", \"subsystem\", \"device\", \"trace\":\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := os.Stat(filepath.Join(start, \"dev\")); err == nil {\n\t\t\tif buf, err := ioutil.ReadFile(filepath.Join(start, \"dev\")); err == nil {\n\t\t\t\tdev := strings.TrimSpace(string(buf))\n\t\t\t\tif s.String() == dev {\n\t\t\t\t\tif buf, err = ioutil.ReadFile(filepath.Join(filepath.Dir(start), \"dev\")); err == nil {\n\t\t\t\t\t\tmajorminor := strings.Split(strings.TrimSpace(string(buf)), \":\")\n\t\t\t\t\t\tmajor, _ := strconv.Atoi(majorminor[0])\n\t\t\t\t\t\tminor, _ := strconv.Atoi(majorminor[1])\n\t\t\t\t\t\treturn &Dev{Major: uint64(major), Minor: uint64(minor)}, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdevBlk, err := findBlock(filepath.Join(start, fi.Name()), s)\n\t\tif err == nil {\n\t\t\treturn devBlk, err\n\t\t}\n\t}\n\treturn nil, errors.New(\"failed to find dev\")\n}\n<commit_msg>fix build<commit_after>package guest_fsresize\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/vtolstov\/go-ioctl\"\n)\n\nfunc resizefs(path string) error {\n\tvar err error\n\tvar stdout io.ReadCloser\n\tvar stdin bytes.Buffer\n\n\tpartstart := 0\n\tpartnum := 0\n\tdevice := \"\/tmp\/resize_dev\"\n\tpartition := \"\/tmp\/resize_part\"\n\tactive := false\n\textended := false\n\tparttype := \"Linux\"\n\tdevFs, err := findFs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdevBlk, err := findBlock(\"\/sys\/block\", devFs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tos.Remove(device)\n\tif err = syscall.Mknod(device, uint32(os.ModeDevice|syscall.S_IFBLK|0600), devBlk.Int()); err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(device)\n\t\/\/\tmbr := make([]byte, 446)\n\n\t\/*\n\t\tf, err := os.OpenFile(device, os.O_RDONLY, os.FileMode(0400))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.ReadFull(f, mbr)\n\t\tf.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\tcmd := exec.Command(\"fdisk\", \"-l\", \"-u\", device)\n\tstdout, err = cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Printf(\"failed to open %s via fdisk %s 2\\n\", device, err.Error())\n\t\treturn err\n\t}\n\tr := bufio.NewReader(stdout)\n\n\tif err = cmd.Start(); err != nil {\n\t\tlog.Printf(\"failed to open %s via fdisk %s 3\\n\", device, err.Error())\n\t\treturn err\n\t}\n\n\tfor {\n\t\tline, err := r.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif strings.HasPrefix(line, device) {\n\t\t\tpartnum += 1\n\t\t\t\/\/\/test3 * 16384 204799 188416 92M 5 Extended\n\t\t\tps := strings.Fields(line)\n\t\t\tif ps[1] == \"*\" {\n\t\t\t\tactive = true\n\t\t\t\tpartstart, _ = strconv.Atoi(ps[2])\n\t\t\t\tif len(ps) > 7 {\n\t\t\t\t\tparttype = ps[6]\n\t\t\t\t\tif ps[7] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tparttype = ps[5]\n\t\t\t\t\tif ps[6] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tactive = false\n\t\t\t\tpartstart, _ = strconv.Atoi(ps[1])\n\t\t\t\tif len(ps) > 6 {\n\t\t\t\t\tparttype = ps[5]\n\t\t\t\t\tif ps[6] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tparttype = ps[4]\n\t\t\t\t\tif ps[5] == \"Extended\" {\n\t\t\t\t\t\textended = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif err = cmd.Wait(); err != nil || partstart == 0 {\n\t\treturn fmt.Errorf(\"failed to open %s via fdisk 4\\n\", device)\n\t}\n\tif partnum > 1 {\n\t\tstdin.Write([]byte(\"d\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\"))\n\t} else {\n\t\tstdin.Write([]byte(\"d\\n\"))\n\t}\n\tif extended {\n\t\tstdin.Write([]byte(\"n\\nl\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\" + fmt.Sprintf(\"%d\", partstart) + \"\\n\\n\"))\n\t} else {\n\t\tstdin.Write([]byte(\"n\\np\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\" + fmt.Sprintf(\"%d\", partstart) + \"\\n\\n\"))\n\t}\n\tif active {\n\t\tstdin.Write([]byte(\"a\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\"))\n\t}\n\tif partnum > 1 {\n\t\tstdin.Write([]byte(\"t\\n\" + fmt.Sprintf(\"%d\", partnum) + \"\\n\" + parttype + \"\\nw\"))\n\t} else {\n\t\tstdin.Write([]byte(\"t\\n\" + parttype + \"\\nw\"))\n\t}\n\tcmd = exec.Command(\"fdisk\", \"-u\", device)\n\tcmd.Stdin = &stdin\n\tcmd.Run()\n\tstdin.Reset()\n\n\tw, err := os.OpenFile(device, os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/*\n\t\t_, err = w.Write(mbr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = w.Sync()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\tblkerr := ioctl.BlkRRPart(w.Fd())\n\terr = w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif blkerr != nil {\n\t\targs := []string{}\n\t\tfor _, name := range []string{\"partx\", \"partprobe\", \"kpartx\"} {\n\t\t\tif _, err = exec.LookPath(name); err == nil {\n\t\t\t\tswitch name {\n\t\t\t\tcase \"partx\":\n\t\t\t\t\targs = []string{\"-u\", device}\n\t\t\t\tdefault:\n\t\t\t\t\targs = []string{device}\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"update partition table via %s %s\", name, strings.Join(args, \" \"))\n\t\t\t\tif err = exec.Command(name, args...).Run(); err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tos.Remove(partition)\n\tif err = syscall.Mknod(partition, uint32(os.ModeDevice|syscall.S_IFBLK|0600), devFs.Int()); err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(partition)\n\tlog.Printf(\"resize filesystem via %s %s\", \"resize2fs\", partition)\n\tbuf, err := exec.Command(\"resize2fs\", partition).CombinedOutput()\n\tif err != nil {\n\t\tlog.Printf(\"resize2fs %s\", buf)\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype Dev struct {\n\tMajor uint64\n\tMinor uint64\n}\n\nfunc (d *Dev) String() string {\n\treturn fmt.Sprintf(\"%d:%d\", d.Major, d.Minor)\n}\n\nfunc (d *Dev) Int() int {\n\treturn int(d.Major*256 + d.Minor)\n}\n\nfunc findFs(path string) (*Dev, error) {\n\tvar st syscall.Stat_t\n\n\terr := syscall.Stat(path, &st)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Dev{Major: uint64(st.Dev \/ 256), Minor: uint64(st.Dev % 256)}, nil\n}\n\nfunc findBlock(start string, s *Dev) (*Dev, error) {\n\tvar err error\n\tfis, err := ioutil.ReadDir(start)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fi := range fis {\n\t\tswitch fi.Name() {\n\t\tcase \"bdi\", \"subsystem\", \"device\", \"trace\":\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := os.Stat(filepath.Join(start, \"dev\")); err == nil {\n\t\t\tif buf, err := ioutil.ReadFile(filepath.Join(start, \"dev\")); err == nil {\n\t\t\t\tdev := strings.TrimSpace(string(buf))\n\t\t\t\tif s.String() == dev {\n\t\t\t\t\tif buf, err = ioutil.ReadFile(filepath.Join(filepath.Dir(start), \"dev\")); err == nil {\n\t\t\t\t\t\tmajorminor := strings.Split(strings.TrimSpace(string(buf)), \":\")\n\t\t\t\t\t\tmajor, _ := strconv.Atoi(majorminor[0])\n\t\t\t\t\t\tminor, _ := strconv.Atoi(majorminor[1])\n\t\t\t\t\t\treturn &Dev{Major: uint64(major), Minor: uint64(minor)}, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdevBlk, err := findBlock(filepath.Join(start, fi.Name()), s)\n\t\tif err == nil {\n\t\t\treturn devBlk, err\n\t\t}\n\t}\n\treturn nil, errors.New(\"failed to find dev\")\n}\n<|endoftext|>"} {"text":"<commit_before>package selector\n<commit_msg>Added tests for GetSkillsOrder<commit_after>package selector\n\nimport \"testing\"\nimport \"github.com\/AxelUser\/gowork\/utils\"\n\nfunc TestGetSkillsOrder_PassOntology_SkillsAreInSameOrder(t *testing.T) {\n\tontology := utils.CreateOntology([]string{\"js\", \"css\", \"html\"}, false, true)\n\n\tskillsInOrder := getSkillsOrder(ontology)\n\n\tfor i := range ontology {\n\t\tif ontology[i].Alias != skillsInOrder[i] {\n\t\t\tt.Errorf(\"Expected skill <%s>, but was <%s>\", ontology[i].Alias, skillsInOrder[i])\n\t\t}\n\t}\n}\n\nfunc TestGetSkillsOrder_PassOntology_SameLenght(t *testing.T) {\n\tontology := utils.CreateOntology([]string{\"js\", \"css\", \"html\"}, false, true)\n\n\tskillsInOrder := getSkillsOrder(ontology)\n\n\tif len(ontology) != len(skillsInOrder) {\n\t\tt.Errorf(\"Expected length <%d>, but was <%d>\", len(ontology), len(skillsInOrder))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage autocert\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ ErrCacheMiss is returned when a certificate is not found in cache.\nvar ErrCacheMiss = errors.New(\"acme\/autocert: certificate cache miss\")\n\n\/\/ Cache is used by Manager to store and retrieve previously obtained certificates\n\/\/ and other account data as opaque blobs.\n\/\/\n\/\/ Cache implementations should not rely on the key naming pattern. Keys can\n\/\/ include any printable ASCII characters, except the following: \\\/:*?\"<>|\ntype Cache interface {\n\t\/\/ Get returns a certificate data for the specified key.\n\t\/\/ If there's no such key, Get returns ErrCacheMiss.\n\tGet(ctx context.Context, key string) ([]byte, error)\n\n\t\/\/ Put stores the data in the cache under the specified key.\n\t\/\/ Underlying implementations may use any data storage format,\n\t\/\/ as long as the reverse operation, Get, results in the original data.\n\tPut(ctx context.Context, key string, data []byte) error\n\n\t\/\/ Delete removes a certificate data from the cache under the specified key.\n\t\/\/ If there's no such key in the cache, Delete returns nil.\n\tDelete(ctx context.Context, key string) error\n}\n\n\/\/ DirCache implements Cache using a directory on the local filesystem.\n\/\/ If the directory does not exist, it will be created with 0700 permissions.\ntype DirCache string\n\n\/\/ Get reads a certificate data from the specified file name.\nfunc (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {\n\tname = filepath.Join(string(d), name)\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t\tdone = make(chan struct{})\n\t)\n\tgo func() {\n\t\tdata, err = ioutil.ReadFile(name)\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-done:\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn nil, ErrCacheMiss\n\t}\n\treturn data, err\n}\n\n\/\/ Put writes the certificate data to the specified file name.\n\/\/ The file will be created with 0600 permissions.\nfunc (d DirCache) Put(ctx context.Context, name string, data []byte) error {\n\tif err := os.MkdirAll(string(d), 0700); err != nil {\n\t\treturn err\n\t}\n\n\tdone := make(chan struct{})\n\tvar err error\n\tgo func() {\n\t\tdefer close(done)\n\t\tvar tmp string\n\t\tif tmp, err = d.writeTempFile(name, data); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer os.Remove(tmp)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ Don't overwrite the file if the context was canceled.\n\t\tdefault:\n\t\t\tnewName := filepath.Join(string(d), name)\n\t\t\terr = os.Rename(tmp, newName)\n\t\t}\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-done:\n\t}\n\treturn err\n}\n\n\/\/ Delete removes the specified file name.\nfunc (d DirCache) Delete(ctx context.Context, name string) error {\n\tname = filepath.Join(string(d), name)\n\tvar (\n\t\terr error\n\t\tdone = make(chan struct{})\n\t)\n\tgo func() {\n\t\terr = os.Remove(name)\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-done:\n\t}\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ writeTempFile writes b to a temporary file, closes the file and returns its path.\nfunc (d DirCache) writeTempFile(prefix string, b []byte) (name string, reterr error) {\n\t\/\/ TempFile uses 0600 permissions\n\tf, err := ioutil.TempFile(string(d), prefix)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() {\n\t\tif reterr != nil {\n\t\t\tos.Remove(f.Name())\n\t\t}\n\t}()\n\tif _, err := f.Write(b); err != nil {\n\t\tf.Close()\n\t\treturn \"\", err\n\t}\n\treturn f.Name(), f.Close()\n}\n<commit_msg>acme\/autocert: properly clean DirCache paths<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage autocert\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ ErrCacheMiss is returned when a certificate is not found in cache.\nvar ErrCacheMiss = errors.New(\"acme\/autocert: certificate cache miss\")\n\n\/\/ Cache is used by Manager to store and retrieve previously obtained certificates\n\/\/ and other account data as opaque blobs.\n\/\/\n\/\/ Cache implementations should not rely on the key naming pattern. Keys can\n\/\/ include any printable ASCII characters, except the following: \\\/:*?\"<>|\ntype Cache interface {\n\t\/\/ Get returns a certificate data for the specified key.\n\t\/\/ If there's no such key, Get returns ErrCacheMiss.\n\tGet(ctx context.Context, key string) ([]byte, error)\n\n\t\/\/ Put stores the data in the cache under the specified key.\n\t\/\/ Underlying implementations may use any data storage format,\n\t\/\/ as long as the reverse operation, Get, results in the original data.\n\tPut(ctx context.Context, key string, data []byte) error\n\n\t\/\/ Delete removes a certificate data from the cache under the specified key.\n\t\/\/ If there's no such key in the cache, Delete returns nil.\n\tDelete(ctx context.Context, key string) error\n}\n\n\/\/ DirCache implements Cache using a directory on the local filesystem.\n\/\/ If the directory does not exist, it will be created with 0700 permissions.\ntype DirCache string\n\n\/\/ Get reads a certificate data from the specified file name.\nfunc (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {\n\tname = filepath.Join(string(d), filepath.Clean(\"\/\"+name))\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t\tdone = make(chan struct{})\n\t)\n\tgo func() {\n\t\tdata, err = ioutil.ReadFile(name)\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-done:\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn nil, ErrCacheMiss\n\t}\n\treturn data, err\n}\n\n\/\/ Put writes the certificate data to the specified file name.\n\/\/ The file will be created with 0600 permissions.\nfunc (d DirCache) Put(ctx context.Context, name string, data []byte) error {\n\tif err := os.MkdirAll(string(d), 0700); err != nil {\n\t\treturn err\n\t}\n\n\tdone := make(chan struct{})\n\tvar err error\n\tgo func() {\n\t\tdefer close(done)\n\t\tvar tmp string\n\t\tif tmp, err = d.writeTempFile(name, data); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer os.Remove(tmp)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ Don't overwrite the file if the context was canceled.\n\t\tdefault:\n\t\t\tnewName := filepath.Join(string(d), filepath.Clean(\"\/\"+name))\n\t\t\terr = os.Rename(tmp, newName)\n\t\t}\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-done:\n\t}\n\treturn err\n}\n\n\/\/ Delete removes the specified file name.\nfunc (d DirCache) Delete(ctx context.Context, name string) error {\n\tname = filepath.Join(string(d), filepath.Clean(\"\/\"+name))\n\tvar (\n\t\terr error\n\t\tdone = make(chan struct{})\n\t)\n\tgo func() {\n\t\terr = os.Remove(name)\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-done:\n\t}\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ writeTempFile writes b to a temporary file, closes the file and returns its path.\nfunc (d DirCache) writeTempFile(prefix string, b []byte) (name string, reterr error) {\n\t\/\/ TempFile uses 0600 permissions\n\tf, err := ioutil.TempFile(string(d), prefix)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() {\n\t\tif reterr != nil {\n\t\t\tos.Remove(f.Name())\n\t\t}\n\t}()\n\tif _, err := f.Write(b); err != nil {\n\t\tf.Close()\n\t\treturn \"\", err\n\t}\n\treturn f.Name(), f.Close()\n}\n<|endoftext|>"}